diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..8aa590c
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,64 @@
+name: CI
+
+on:
+ push:
+ branches: ['**']
+ pull_request:
+
+concurrency:
+ group: ci-${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ test:
+ name: Jest (Node ${{ matrix.node }})
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ node: ['20', '22']
+
+ env:
+ CI: 'true'
+ # Keep tests hermetic: anything that auto-opts-in based on
+ # NODE_ENV stays in test mode, not dev/prod.
+ NODE_ENV: test
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Node ${{ matrix.node }}
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node }}
+ cache: npm
+
+ - name: Install
+ # `npm ci` is strict about package-lock.json; if the lock
+ # drifts we want the CI to fail loudly rather than silently
+ # resolve a new tree.
+ run: npm ci
+
+ - name: Run tests
+ run: npm test -- --runInBand --ci --colors
+
+ lint-sql:
+ name: Schema sanity
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Verify migrations directory has a deterministic ordering
+ # Guard against accidentally shipping two migrations with
+ # colliding numeric prefixes — the runner sorts lexicographically
+ # and duplicate prefixes would make apply order undefined.
+ run: |
+ set -euo pipefail
+ cd db/migrations
+ dup=$(ls | awk -F'_' '{print $1}' | sort | uniq -d || true)
+ if [ -n "$dup" ]; then
+ echo "Duplicate migration prefix(es): $dup"
+ exit 1
+ fi
diff --git a/db/migrations/001_init.sql b/db/migrations/001_init.sql
index 4f853e0..6340350 100644
--- a/db/migrations/001_init.sql
+++ b/db/migrations/001_init.sql
@@ -67,6 +67,41 @@
-- short-lived under Core's time window, and keeping them server-side
-- expands attack surface with zero replay value; a retry regenerates
-- a fresh sig client-side from the vault.
+--
+-- proposal_drafts
+-- User's in-progress proposal text. Server-side so the same draft
+-- is available across devices once the user is logged in (Twitter-
+-- compose-style: log out / close / switch devices and the drafts
+-- follow the account). Drafts are plaintext because a governance
+-- proposal's content is, by definition, about to go public on
+-- chain — encrypting it would add friction for zero security
+-- benefit. The payment_amount is stored in satoshis as INTEGER
+-- (fits in int64 for every imaginable proposal size) to avoid the
+-- float-precision traps of storing SYS decimals.
+--
+-- proposal_submissions
+-- One row per proposal the user has actually committed to publishing
+-- (i.e. they've advanced past the draft step). The row is created at
+-- "prepare" time with a frozen canonical snapshot (parent_hash +
+-- revision + time_unix + data_hex + proposal_hash) — those fields
+-- are the hash preimage and must not change after this point, else
+-- the 150 SYS collateral OP_RETURN would stop matching. The row
+-- moves through a small state machine advanced partly by the user
+-- (reporting a collateral txid) and partly by the reminder-style
+-- dispatcher (watching confirmations, calling gobject_submit once
+-- mature). Statuses:
+-- prepared hash + dataHex computed, shown to user,
+-- no collateral yet.
+-- awaiting_collateral user has supplied a collateral txid;
+-- dispatcher is polling confirmations.
+-- submitted gobject_submit succeeded; governance_hash
+-- is set. Terminal (happy path).
+-- failed something fatal happened; fail_reason
+-- is a stable machine code, fail_detail is
+-- raw context. Terminal.
+-- There is no 'abandoned' status — users who back out before paying
+-- just DELETE their row. The status column has no CHECK constraint
+-- so the repo layer owns validation (mirroring vote_receipts.status).
CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -191,3 +226,108 @@ CREATE INDEX idx_receipts_user_proposal
ON vote_receipts(user_id, proposal_hash);
CREATE INDEX idx_receipts_user_recent
ON vote_receipts(user_id, submitted_at DESC);
+
+-- proposal_drafts: user's in-progress proposal content. No canonical
+-- snapshot or hash here — drafts haven't committed to an on-chain
+-- identity yet. `payment_amount_sats` is an integer number of
+-- satoshis (int64 range easily accommodates any realistic amount);
+-- storing SYS as a decimal REAL would drift under float arithmetic.
+-- `start_epoch` / `end_epoch` are nullable because a user may save
+-- before choosing a superblock.
+CREATE TABLE proposal_drafts (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ title TEXT NOT NULL DEFAULT '',
+ name TEXT NOT NULL DEFAULT '',
+ url TEXT NOT NULL DEFAULT '',
+ description TEXT NOT NULL DEFAULT '',
+ payment_address TEXT NOT NULL DEFAULT '',
+ payment_amount_sats INTEGER NOT NULL DEFAULT 0,
+ payment_count INTEGER NOT NULL DEFAULT 1,
+ start_epoch INTEGER,
+ end_epoch INTEGER,
+ created_at INTEGER NOT NULL,
+ updated_at INTEGER NOT NULL
+);
+
+CREATE INDEX idx_proposal_drafts_user_recent
+ ON proposal_drafts(user_id, updated_at DESC);
+
+-- proposal_submissions: once the user commits to publishing, we
+-- snapshot the canonical (parent_hash, revision, time_unix, data_hex,
+-- proposal_hash) tuple. Anything derived from data_hex (name, url,
+-- payment_*) is duplicated in typed columns for indexing and display,
+-- but the source of truth for what the chain sees is data_hex — the
+-- repo layer guarantees the denormalized columns stay in sync with
+-- it. draft_id is intentionally ON DELETE SET NULL so a user can
+-- clean up their drafts list without destroying the historical
+-- record of what they submitted.
+CREATE TABLE proposal_submissions (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ draft_id INTEGER REFERENCES proposal_drafts(id) ON DELETE SET NULL,
+
+ parent_hash TEXT NOT NULL DEFAULT '0',
+ revision INTEGER NOT NULL DEFAULT 1,
+ time_unix INTEGER NOT NULL,
+ data_hex TEXT NOT NULL,
+ proposal_hash TEXT NOT NULL,
+
+ title TEXT NOT NULL DEFAULT '',
+ name TEXT NOT NULL,
+ url TEXT NOT NULL,
+ payment_address TEXT NOT NULL,
+ payment_amount_sats INTEGER NOT NULL,
+ payment_count INTEGER NOT NULL DEFAULT 1,
+ start_epoch INTEGER NOT NULL,
+ end_epoch INTEGER NOT NULL,
+
+ status TEXT NOT NULL,
+ collateral_txid TEXT,
+ collateral_confs INTEGER NOT NULL DEFAULT 0,
+ governance_hash TEXT,
+ fail_reason TEXT,
+ fail_detail TEXT,
+
+ created_at INTEGER NOT NULL,
+ updated_at INTEGER NOT NULL
+);
+
+-- Per-user recency index (for the "your submissions" page).
+CREATE INDEX idx_proposal_submissions_user_recent
+ ON proposal_submissions(user_id, updated_at DESC);
+
+-- Dispatcher-facing index: the watcher tick scans rows by status to
+-- advance them, so keep that lookup fast regardless of table size.
+CREATE INDEX idx_proposal_submissions_status
+ ON proposal_submissions(status, updated_at);
+
+-- Partial uniqueness on collateral_txid: a given collateral tx can
+-- only back a single proposal submission. Two rows claiming the same
+-- txid is a bug (probably a duplicate "I paid, here's the txid" call
+-- from the user). NULL txids are exempt, which is the correct
+-- treatment for rows still in `prepared` state.
+CREATE UNIQUE INDEX idx_proposal_submissions_collateral_txid
+ ON proposal_submissions(collateral_txid)
+ WHERE collateral_txid IS NOT NULL;
+
+-- Codex PR8 round 3 P2: enforce /prepare idempotency at the DB layer.
+-- The route reads by (user_id, data_hex, status='prepared') and then
+-- inserts; without this partial unique index, two concurrent requests
+-- with identical payload can both miss the read and both insert,
+-- producing duplicate `prepared` rows for the same logical proposal.
+-- Once the row moves past `prepared` (the user attaches collateral,
+-- or it ends up `submitted`/`failed`), the partial predicate no
+-- longer matches and a subsequent retry with the same dataHex is
+-- free to create a fresh `prepared` row — which is the correct UX:
+-- the old submission is locked to a specific collateral txid, and a
+-- re-prepare is the user explicitly asking for a clean second take.
+CREATE UNIQUE INDEX idx_proposal_submissions_user_payload_prepared
+ ON proposal_submissions(user_id, data_hex)
+ WHERE status = 'prepared';
+
+-- Governance hash is likewise unique once set — it IS the proposal's
+-- on-chain identity. A NULL is expected for rows not yet submitted.
+CREATE UNIQUE INDEX idx_proposal_submissions_governance_hash
+ ON proposal_submissions(governance_hash)
+ WHERE governance_hash IS NOT NULL;
diff --git a/lib/appFactory.js b/lib/appFactory.js
index b2f0968..3451317 100644
--- a/lib/appFactory.js
+++ b/lib/appFactory.js
@@ -11,12 +11,16 @@ const {
} = require('./pendingRegistrations');
const { createVaultsRepo } = require('./vaults');
const { createVoteReceiptsRepo } = require('./voteReceipts');
+const { createProposalDraftsRepo } = require('./proposalDrafts');
+const { createProposalSubmissionsRepo } = require('./proposalSubmissions');
+const { createProposalDispatcher } = require('./proposalDispatcher');
const { createSessionMiddleware } = require('../middleware/session');
const { createCsrfMiddleware } = require('../middleware/csrf');
const rateLimiters = require('../middleware/rateLimit');
const { createAuthRouter } = require('../routes/auth');
const { createVaultRouter } = require('../routes/vault');
const { createGovRouter } = require('../routes/gov');
+const { createGovProposalsRouter } = require('../routes/govProposals');
// Build the stateful services (repos + middlewares) around a DB handle.
// Pure-ish: no Express side effects yet, so the same object graph can be
@@ -35,6 +39,8 @@ function buildServices({
pendingRegistrations: createPendingRegistrationsRepo(db, { now }),
vaults: createVaultsRepo(db, { now }),
voteReceipts: createVoteReceiptsRepo(db, { now }),
+ proposalDrafts: createProposalDraftsRepo(db, { now }),
+ proposalSubmissions: createProposalSubmissionsRepo(db, { now }),
sessionMw: null, // finalized once we know `users`
csrfMw: createCsrfMiddleware({ secureCookies }),
secureCookies,
@@ -78,6 +84,16 @@ function mountAuthAndVault(
voteRaw,
getCurrentVotes,
invalidateCurrentVotes,
+ // Optional: Syscoin RPC adapter for governance writes.
+ // Expected shape (camelCase on purpose — see
+ // lib/proposalDispatcher.js for the full rationale; callers
+ // wrap @syscoin/syscoin-js's snake_case methods):
+ // getRawTransaction(txid, verbose?) -> tx-json / throws
+ // gObjectSubmit(parentHash, rev, t, dataHex, txid) -> hash
+ // gObjectCheck(parentHash, rev, t, dataHex) -> ok|detail
+ // Only the subset actually consumed by each route/dispatcher is
+ // used, so test wiring can mock sparsely.
+ proposalRpc = null,
}
) {
if (!services.sessionMw) finalizeSessionMw(services);
@@ -145,6 +161,28 @@ function mountAuthAndVault(
);
}
+ // /gov/proposals is a separate, self-contained router. Unlike /gov
+ // it does NOT depend on masternodesProvider or voteRaw; it only
+ // needs the proposal repos (already in `services`) and — optionally
+ // — a `proposalRpc` with gObjectCheck for the prepare-time
+ // pre-flight. If no RPC is wired, the route degrades to "skip the
+ // pre-flight" silently (see routes/govProposals.js). Mount it
+ // unconditionally because users can still manage drafts offline.
+ if (services.proposalDrafts && services.proposalSubmissions) {
+ app.use(
+ '/gov/proposals',
+ createGovProposalsRouter({
+ drafts: services.proposalDrafts,
+ submissions: services.proposalSubmissions,
+ sessionMw: services.sessionMw,
+ csrfMw: services.csrfMw,
+ rpc: proposalRpc,
+ runAtomic: services.runAtomic,
+ now,
+ })
+ );
+ }
+
// Last-chance error-handling middleware for /auth and /vault.
//
// Express 4 does not automatically route rejected async-handler
@@ -196,6 +234,18 @@ function createApp({
voteRaw,
getCurrentVotes,
invalidateCurrentVotes,
+ proposalRpc = null,
+ // If true and a proposalRpc is wired, start the background
+ // proposal dispatcher interval. Production: true. Tests: false
+ // (they prefer to advance the dispatcher manually by calling
+ // `tick()` so they can observe each transition deterministically).
+ startProposalDispatcher = false,
+ // Interval between dispatcher ticks when startProposalDispatcher
+ // is true. Default 60s — slow enough to be polite to the RPC node
+ // (n submissions * getRawTransaction per tick) but fast enough
+ // that the 6-conf threshold is reached within ~1-2 blocks past
+ // the real confirmation.
+ proposalDispatcherIntervalMs = 60_000,
} = {}) {
if (!db) throw new Error('appFactory: db is required');
if (!mailer) throw new Error('appFactory: mailer is required');
@@ -221,10 +271,124 @@ function createApp({
voteRaw,
getCurrentVotes,
invalidateCurrentVotes,
+ proposalRpc,
});
app.get('/health', (_req, res) => res.json({ ok: true }));
+ // Optional: dispatcher lifecycle. We build the dispatcher even when
+ // the interval is off so tests can drive it by calling `tick()`
+ // directly via the returned handle. The onSubmitted / onFailed
+ // hooks resolve the submission's user email and dispatch the
+ // corresponding mailer method. A missing user record (ghost submission
+ // after a hard account-delete) short-circuits without throwing —
+ // these hooks are best-effort by design (dispatcher.js catches
+ // everything).
+ let dispatcher = null;
+ let dispatcherTimer = null;
+ // Codex PR8 round 8 P2: `stopProposalDispatcher` must prevent a
+ // tick that is already *in flight* (i.e. awaiting
+ // `dispatcher.tick()`) from re-arming the loop after it resolves.
+ // clearTimeout() only cancels a *pending* timer — it can't abort
+ // an async function that's already past the `await`. This flag
+ // is checked right before scheduling the next setTimeout so a
+ // late-arriving tick becomes a silent no-op instead of
+ // resurrecting the polling loop (which would keep hitting
+ // services and the RPC node on a process that's trying to shut
+ // down cleanly, and leak the timer into post-teardown tests).
+ let dispatcherStopped = false;
+ if (proposalRpc && typeof proposalRpc.getRawTransaction === 'function') {
+ async function mailOnStateChange(kind, submission) {
+ // Lookup is read-only; users.findById returns null for a
+ // deleted account, in which case we simply drop the email.
+ let user;
+ try {
+ user = services.users.findById(submission.userId);
+ } catch (_err) {
+ return;
+ }
+ if (!user || !user.email) return;
+ const common = {
+ to: user.email,
+ proposalName: submission.name,
+ submissionId: submission.id,
+ };
+ if (kind === 'submitted') {
+ await mailer.sendProposalSubmitted({
+ ...common,
+ governanceHash: submission.governanceHash,
+ collateralTxid: submission.collateralTxid,
+ });
+ } else {
+ await mailer.sendProposalFailed({
+ ...common,
+ failReason: submission.failReason,
+ failDetail: submission.failDetail,
+ });
+ }
+ }
+
+ dispatcher = createProposalDispatcher({
+ submissions: services.proposalSubmissions,
+ rpc: proposalRpc,
+ now,
+ onSubmitted: ({ submission }) =>
+ mailOnStateChange('submitted', submission),
+ onFailed: ({ submission }) => mailOnStateChange('failed', submission),
+ });
+
+ if (startProposalDispatcher) {
+ // Reset the stopped flag so re-wiring (e.g. in tests that
+ // call mountAuthAndVault multiple times in the same process)
+ // starts fresh.
+ dispatcherStopped = false;
+ // Stagger the first run a few seconds after boot so we don't
+ // hammer the RPC node at the same instant as every other
+ // scheduled task (reminder dispatcher, etc).
+ const kickoff = setTimeout(() => {
+ // eslint-disable-next-line no-inner-declarations
+ async function fireAndSchedule() {
+ // Codex PR8 round 8 P2: if stopProposalDispatcher() was
+ // called after the previous setTimeout fired but before
+ // this callback started running, the timer handle was
+ // already consumed by Node and clearTimeout() was a
+ // no-op. Bail out here so we don't hit services during
+ // teardown.
+ if (dispatcherStopped) return;
+ try {
+ await dispatcher.tick();
+ } catch (err) {
+ // Dispatcher swallows per-row errors internally; any
+ // throw out here is an invariant violation worth logging.
+ // eslint-disable-next-line no-console
+ console.error('[proposalDispatcher] tick crashed', err);
+ }
+ // Re-check AFTER the await: stopProposalDispatcher may
+ // have been called while the tick was in flight (common
+ // in graceful-shutdown paths and in Jest teardown). If
+ // it was, do NOT re-arm — otherwise the loop effectively
+ // ignores the stop signal and keeps polling until
+ // process exit.
+ if (dispatcherStopped) return;
+ dispatcherTimer = setTimeout(
+ fireAndSchedule,
+ proposalDispatcherIntervalMs
+ );
+ }
+ fireAndSchedule();
+ }, Math.min(5000, proposalDispatcherIntervalMs));
+ dispatcherTimer = kickoff;
+ }
+ }
+
+ function stopProposalDispatcher() {
+ dispatcherStopped = true;
+ if (dispatcherTimer) {
+ clearTimeout(dispatcherTimer);
+ dispatcherTimer = null;
+ }
+ }
+
return {
app,
users: services.users,
@@ -233,6 +397,10 @@ function createApp({
pendingRegistrations: services.pendingRegistrations,
vaults: services.vaults,
voteReceipts: services.voteReceipts,
+ proposalDrafts: services.proposalDrafts,
+ proposalSubmissions: services.proposalSubmissions,
+ proposalDispatcher: dispatcher,
+ stopProposalDispatcher,
sessionMw: services.sessionMw,
csrfMw: services.csrfMw,
runAtomic: services.runAtomic,
diff --git a/lib/db.test.js b/lib/db.test.js
index 6e4f785..e62e07b 100644
--- a/lib/db.test.js
+++ b/lib/db.test.js
@@ -18,6 +18,8 @@ describe('db.openDatabase', () => {
expect.arrayContaining([
'email_verifications',
'pending_registrations',
+ 'proposal_drafts',
+ 'proposal_submissions',
'sessions',
'tracked_masternodes',
'users',
@@ -142,6 +144,150 @@ describe('db.openDatabase', () => {
db.close();
});
+ test('proposal_drafts cascade-delete when user is removed', () => {
+ const db = openDatabase(':memory:');
+ const now = Date.now();
+ const r = db
+ .prepare(
+ 'INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at) VALUES (?, ?, ?, ?, ?)'
+ )
+ .run('a@b.com', 'hash', FAKE_SALT_V, now, now);
+ const uid = r.lastInsertRowid;
+ db.prepare(
+ 'INSERT INTO proposal_drafts (user_id, created_at, updated_at) VALUES (?, ?, ?)'
+ ).run(uid, now, now);
+ expect(db.prepare('SELECT COUNT(*) AS c FROM proposal_drafts').get().c).toBe(1);
+ db.prepare('DELETE FROM users WHERE id = ?').run(uid);
+ expect(db.prepare('SELECT COUNT(*) AS c FROM proposal_drafts').get().c).toBe(0);
+ db.close();
+ });
+
+ test('proposal_submissions enforce unique collateral_txid when set, allow multiple NULLs', () => {
+ const db = openDatabase(':memory:');
+ const now = Date.now();
+ const r = db
+ .prepare(
+ 'INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at) VALUES (?, ?, ?, ?, ?)'
+ )
+ .run('a@b.com', 'hash', FAKE_SALT_V, now, now);
+ const uid = r.lastInsertRowid;
+ const baseCols = [
+ 'user_id',
+ 'time_unix',
+ 'data_hex',
+ 'proposal_hash',
+ 'name',
+ 'url',
+ 'payment_address',
+ 'payment_amount_sats',
+ 'start_epoch',
+ 'end_epoch',
+ 'status',
+ 'collateral_txid',
+ 'created_at',
+ 'updated_at',
+ ];
+ const placeholders = baseCols.map(() => '?').join(',');
+ const ins = db.prepare(
+ `INSERT INTO proposal_submissions (${baseCols.join(',')}) VALUES (${placeholders})`
+ );
+ // data_hex varies per row because there is also a partial unique
+ // index `(user_id, data_hex) WHERE status='prepared'` that guards
+ // the /prepare idempotency contract (Codex PR8 round 3 P2). This
+ // test cares only about the collateral_txid uniqueness; using a
+ // distinct data_hex per row keeps the two unrelated invariants
+ // decoupled.
+ const row = (hash, txid, dataHex) => [
+ uid,
+ 1700000000,
+ dataHex,
+ hash,
+ 'n',
+ 'u',
+ 'a',
+ 1,
+ 1700000000,
+ 1800000000,
+ 'prepared',
+ txid,
+ now,
+ now,
+ ];
+ // Two rows with NULL txid are fine (both still in 'prepared').
+ ins.run(...row('h1'.padEnd(64, '0'), null, 'de01'));
+ ins.run(...row('h2'.padEnd(64, '0'), null, 'de02'));
+ // A non-null txid is unique.
+ ins.run(...row('h3'.padEnd(64, '0'), 'abc123', 'de03'));
+ expect(() =>
+ ins.run(...row('h4'.padEnd(64, '0'), 'abc123', 'de04'))
+ ).toThrow(/UNIQUE/i);
+ db.close();
+ });
+
+ test('proposal_submissions governance_hash is unique when set', () => {
+ const db = openDatabase(':memory:');
+ const now = Date.now();
+ const r = db
+ .prepare(
+ 'INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at) VALUES (?, ?, ?, ?, ?)'
+ )
+ .run('a@b.com', 'hash', FAKE_SALT_V, now, now);
+ const uid = r.lastInsertRowid;
+ const cols =
+ 'user_id,time_unix,data_hex,proposal_hash,name,url,payment_address,payment_amount_sats,start_epoch,end_epoch,status,governance_hash,created_at,updated_at';
+ const ins = db.prepare(
+ `INSERT INTO proposal_submissions (${cols}) VALUES (${cols
+ .split(',')
+ .map(() => '?')
+ .join(',')})`
+ );
+ const ph = (hash, govHash) => [
+ uid,
+ 1,
+ 'de',
+ hash,
+ 'n',
+ 'u',
+ 'a',
+ 1,
+ 1,
+ 2,
+ 'submitted',
+ govHash,
+ now,
+ now,
+ ];
+ ins.run(...ph('a'.repeat(64), 'g'.repeat(64)));
+ expect(() => ins.run(...ph('b'.repeat(64), 'g'.repeat(64)))).toThrow(/UNIQUE/i);
+ db.close();
+ });
+
+ test('proposal_submissions.draft_id is ON DELETE SET NULL (history survives)', () => {
+ const db = openDatabase(':memory:');
+ const now = Date.now();
+ const r = db
+ .prepare(
+ 'INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at) VALUES (?, ?, ?, ?, ?)'
+ )
+ .run('a@b.com', 'hash', FAKE_SALT_V, now, now);
+ const uid = r.lastInsertRowid;
+ const d = db
+ .prepare(
+ 'INSERT INTO proposal_drafts (user_id, created_at, updated_at) VALUES (?, ?, ?)'
+ )
+ .run(uid, now, now);
+ const did = d.lastInsertRowid;
+ db.prepare(
+ `INSERT INTO proposal_submissions (user_id, draft_id, time_unix, data_hex, proposal_hash, name, url, payment_address, payment_amount_sats, start_epoch, end_epoch, status, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
+ ).run(uid, did, 1, 'de', 'h'.repeat(64), 'n', 'u', 'a', 1, 1, 2, 'prepared', now, now);
+ db.prepare('DELETE FROM proposal_drafts WHERE id = ?').run(did);
+ const row = db
+ .prepare('SELECT draft_id FROM proposal_submissions WHERE user_id = ?')
+ .get(uid);
+ expect(row.draft_id).toBeNull();
+ db.close();
+ });
+
test('is idempotent: re-opening the same DB keeps data', () => {
const path = `:memory:`;
const db1 = openDatabase(path);
diff --git a/lib/mailer.js b/lib/mailer.js
index 30da8f4..7edca26 100644
--- a/lib/mailer.js
+++ b/lib/mailer.js
@@ -54,6 +54,10 @@ function renderTemplate(tpl, vars, { raw = [] } = {}) {
// - 'vote_reminder' opt-in notification, manage link visible
// - 'account_security' password change, etc; cannot disable
// - 'account_verification' registration email; no account yet
+// - 'proposal_notification' transactional update on a proposal the
+// user authored (submitted, failed); cannot
+// disable because the user directly initiated
+// the underlying action by paying collateral.
function buildFooter({ kind, to, accountUrl }) {
const safeTo = escapeHtml(String(to || ''));
@@ -112,6 +116,34 @@ ${attributionHtml}`;
return { text, html };
}
+ if (kind === 'proposal_notification') {
+ const text = [
+ '',
+ '— — —',
+ `This email was sent to ${to} because you authored a governance`,
+ 'proposal on Syscoin. We only email you about proposals you',
+ 'submitted yourself, and only for state changes you asked for by',
+ 'paying collateral. These transactional updates cannot be disabled.',
+ accountUrl ? `Your proposals: ${accountUrl}` : '',
+ '',
+ attributionText,
+ ]
+ .filter(Boolean)
+ .join('\n');
+ const reviewLink = accountUrl
+ ? ` View your proposals.`
+ : '';
+ const html = `${divider}
+
+ This email was sent to ${safeTo} because you authored
+ a governance proposal on Syscoin. We only email you about proposals
+ you submitted yourself, and only for state changes you asked for by
+ paying collateral. These transactional updates cannot be disabled.${reviewLink}
+
+${attributionHtml}`;
+ return { text, html };
+ }
+
if (kind === 'account_verification') {
const text = [
'',
@@ -170,6 +202,68 @@ const _builtInTemplates = {
v
),
},
+ // proposalSubmitted — sent by the dispatcher once a user's proposal
+ // reaches GOVERNANCE_FEE_CONFIRMATIONS (6) and gObject_submit
+ // returned the governance hash. This is the "it's live on chain"
+ // moment — surface the hash so the user can independently verify
+ // via any block explorer, and link them to the governance page
+ // where their proposal will show up for voting.
+ proposalSubmitted: {
+ subject: (v) => `Your Syscoin proposal "${v.proposalName}" is live`,
+ footerKind: 'proposal_notification',
+ text: (v) =>
+ `Hi,\n\nYour governance proposal "${v.proposalName}" has been accepted on chain and is now open for masternode voting.\n\nGovernance hash: ${v.governanceHash}\nCollateral txid: ${v.collateralTxid}\n\nView on Sysnode: ${v.proposalUrl}\n\n— Syscoin Sysnode`,
+ html: (v) =>
+ renderTemplate(
+ `Your governance proposal {{proposalName}} has been accepted on chain and is now open for masternode voting.
+
+ | Governance hash | {{governanceHash}} |
+ | Collateral txid | {{collateralTxid}} |
+
+View your proposal
`,
+ v,
+ { raw: ['proposalUrl'] }
+ ),
+ },
+
+ // proposalFailed — sent when the dispatcher gives up on a submission.
+ // Reasons we send from (see lib/proposalDispatcher.js):
+ // - collateral_not_found : tx never made it into any block within
+ // the 7-day window. User likely broadcast
+ // to the wrong network or the tx was
+ // double-spent.
+ // - submit_rejected : Core returned a terminal validation
+ // error on gObject_submit (payload
+ // changed, rate limited on the node, etc).
+ // The email includes the raw `failDetail` so the user has the same
+ // context the /submissions/:id page shows.
+ proposalFailed: {
+ subject: (v) => `Your Syscoin proposal "${v.proposalName}" could not be published`,
+ footerKind: 'proposal_notification',
+ text: (v) =>
+ `Hi,\n\nWe were unable to publish your governance proposal "${v.proposalName}" on chain.\n\nReason: ${v.failReason}\n${v.failDetail ? `Detail: ${v.failDetail}\n` : ''}\nThe 150 SYS collateral fee is burned by protocol regardless of whether a submission succeeds; we cannot recover it. You can review the failure and start fresh: ${v.proposalUrl}\n\n— Syscoin Sysnode`,
+ html: (v) =>
+ renderTemplate(
+ `We were unable to publish your governance proposal {{proposalName}} on chain.
+
+ | Reason | {{failReason}} |
+ {{detailRow}}
+
+The 150 SYS collateral fee is burned by protocol regardless of whether a submission succeeds; we cannot recover it.
+Review failure
`,
+ {
+ ...v,
+ // Pre-render the optional detail row so we don't need a
+ // templating-language conditional. renderTemplate itself
+ // escapes placeholder values; the row markup here is fixed.
+ detailRow: v.failDetail
+ ? `| Detail | ${escapeHtml(v.failDetail)} |
`
+ : '',
+ },
+ { raw: ['proposalUrl', 'detailRow'] }
+ ),
+ },
+
// voteReminder — generic copy (PR 7).
//
// The dispatcher sends one of two bucket flavors:
@@ -393,11 +487,62 @@ function createMailer(opts = {}) {
await deliver({ to, ...body });
}
+ // Build the canonical URL for a user's submission. Kept here (not in
+ // routes/govProposals.js) so both mail senders share one source of
+ // truth — the dispatcher doesn't know anything about URL shapes.
+ function proposalUrl(submissionId) {
+ return `${normalizedBase}/governance/proposals/${submissionId}`;
+ }
+
+ async function sendProposalSubmitted({
+ to,
+ proposalName,
+ governanceHash,
+ collateralTxid,
+ submissionId,
+ }) {
+ const accountUrl = `${normalizedBase}/account`;
+ const body = buildMessage(
+ 'proposalSubmitted',
+ {
+ proposalName: proposalName || '(unnamed)',
+ governanceHash,
+ collateralTxid,
+ proposalUrl: proposalUrl(submissionId),
+ },
+ { to, accountUrl }
+ );
+ await deliver({ to, ...body });
+ }
+
+ async function sendProposalFailed({
+ to,
+ proposalName,
+ failReason,
+ failDetail,
+ submissionId,
+ }) {
+ const accountUrl = `${normalizedBase}/account`;
+ const body = buildMessage(
+ 'proposalFailed',
+ {
+ proposalName: proposalName || '(unnamed)',
+ failReason: failReason || 'unknown',
+ failDetail: failDetail || '',
+ proposalUrl: proposalUrl(submissionId),
+ },
+ { to, accountUrl }
+ );
+ await deliver({ to, ...body });
+ }
+
return {
outbox,
sendVerification,
sendPasswordChanged,
sendVoteReminder,
+ sendProposalSubmitted,
+ sendProposalFailed,
};
}
diff --git a/lib/mailer.test.js b/lib/mailer.test.js
index 0593b67..943eb2e 100644
--- a/lib/mailer.test.js
+++ b/lib/mailer.test.js
@@ -134,7 +134,13 @@ describe('mailer.createMailer with memory transport', () => {
test('all built-in templates declared', () => {
expect(Object.keys(_builtInTemplates).sort()).toEqual(
- ['passwordChanged', 'verification', 'voteReminder'].sort()
+ [
+ 'passwordChanged',
+ 'proposalFailed',
+ 'proposalSubmitted',
+ 'verification',
+ 'voteReminder',
+ ].sort()
);
});
});
@@ -408,7 +414,7 @@ describe('mailer compliance footer on every sent message', () => {
// "git blame"-time rather than deploy-time.
for (const [name, tpl] of Object.entries(_builtInTemplates)) {
expect(tpl.footerKind).toMatch(
- /^(account_verification|account_security|vote_reminder)$/
+ /^(account_verification|account_security|vote_reminder|proposal_notification)$/
);
// And the template-name → footer-kind alignment is not accidental.
if (name === 'verification') {
@@ -417,7 +423,168 @@ describe('mailer compliance footer on every sent message', () => {
expect(tpl.footerKind).toBe('account_security');
} else if (name === 'voteReminder') {
expect(tpl.footerKind).toBe('vote_reminder');
+ } else if (
+ name === 'proposalSubmitted' ||
+ name === 'proposalFailed'
+ ) {
+ expect(tpl.footerKind).toBe('proposal_notification');
}
}
});
+
+ test('buildFooter(proposal_notification) mentions authorship + no-disable + review link', () => {
+ const f = buildFooter({
+ kind: 'proposal_notification',
+ to: 'alice@example.com',
+ accountUrl: 'https://sysnode.info/account',
+ });
+ // Transparency: why is the user getting this email?
+ expect(f.text).toMatch(/authored a governance\s+proposal/i);
+ expect(f.text).toMatch(/cannot be disabled/i);
+ expect(f.text).toContain('alice@example.com');
+ expect(f.text).toContain('https://sysnode.info/account');
+
+ expect(f.html).toMatch(/authored\s+a governance/i);
+ expect(f.html).toContain('alice@example.com');
+ expect(f.html).toContain('href="https://sysnode.info/account"');
+ });
+
+ test('buildFooter(proposal_notification) HTML-escapes the accountUrl', () => {
+ const f = buildFooter({
+ kind: 'proposal_notification',
+ to: 'evil"@example.com',
+ accountUrl: 'https://a.test/?x=1&y="2',
+ });
+ expect(f.html).toContain('evil"@example.com');
+ expect(f.html).toContain('x=1&y="2');
+ });
+
+ test('sendProposalSubmitted renders hash, txid, proposal name, and proposal URL', async () => {
+ const mailer = createMailer({
+ transport: 'memory',
+ from: 'no-reply@syscoin.dev',
+ publicBaseUrl: 'https://sysnode.info',
+ });
+ await mailer.sendProposalSubmitted({
+ to: 'author@example.com',
+ proposalName: 'fund-docs',
+ governanceHash: 'a'.repeat(64),
+ collateralTxid: 'b'.repeat(64),
+ submissionId: 42,
+ });
+ expect(mailer.outbox).toHaveLength(1);
+ const msg = mailer.outbox[0];
+ expect(msg.to).toBe('author@example.com');
+ expect(msg.subject).toMatch(/fund-docs/);
+ expect(msg.subject).toMatch(/is live/i);
+
+ // Text body surfaces the hash, txid, and a clickable URL.
+ expect(msg.text).toContain('a'.repeat(64));
+ expect(msg.text).toContain('b'.repeat(64));
+ expect(msg.text).toContain(
+ 'https://sysnode.info/governance/proposals/42'
+ );
+
+ // HTML body carries the same data + CTA, and the proposal_notification
+ // footer text/ link (proof the footer wiring reached the template).
+ expect(msg.html).toContain('a'.repeat(64));
+ expect(msg.html).toContain('b'.repeat(64));
+ expect(msg.html).toContain(
+ 'href="https://sysnode.info/governance/proposals/42"'
+ );
+ expect(msg.html).toMatch(/authored\s+a\s+governance/i);
+ });
+
+ test('sendProposalSubmitted HTML-escapes the proposal name', async () => {
+ const mailer = createMailer({
+ transport: 'memory',
+ from: 'no-reply@syscoin.dev',
+ publicBaseUrl: 'https://sysnode.info',
+ });
+ await mailer.sendProposalSubmitted({
+ to: 'author@example.com',
+ proposalName: '',
+ governanceHash: 'c'.repeat(64),
+ collateralTxid: 'd'.repeat(64),
+ submissionId: 1,
+ });
+ const msg = mailer.outbox[0];
+ // Subject is plain text; script tag content may appear literally.
+ // HTML body must NEVER contain a live ');
+ expect(msg.html).toContain('<script>alert(1)</script>');
+ });
+
+ test('sendProposalFailed surfaces reason, detail, and the burn disclosure', async () => {
+ const mailer = createMailer({
+ transport: 'memory',
+ from: 'no-reply@syscoin.dev',
+ publicBaseUrl: 'https://sysnode.info',
+ });
+ await mailer.sendProposalFailed({
+ to: 'author@example.com',
+ proposalName: 'fund-docs',
+ failReason: 'submit_rejected',
+ failDetail: 'governance object already exists',
+ submissionId: 7,
+ });
+ const msg = mailer.outbox[0];
+ expect(msg.subject).toMatch(/could not be published/i);
+ expect(msg.subject).toMatch(/fund-docs/);
+
+ // Both reason and detail are visible so the user can correlate
+ // with what /submissions/:id shows.
+ expect(msg.text).toContain('submit_rejected');
+ expect(msg.text).toContain('governance object already exists');
+
+ // Must be explicit about the 150 SYS burn — users should never
+ // learn about an unrefundable fee by surprise.
+ expect(msg.text).toMatch(/150 SYS.*burned/i);
+ expect(msg.html).toMatch(/150 SYS.*burned/i);
+
+ // CTA back to the status page.
+ expect(msg.text).toContain(
+ 'https://sysnode.info/governance/proposals/7'
+ );
+ expect(msg.html).toContain(
+ 'href="https://sysnode.info/governance/proposals/7"'
+ );
+ });
+
+ test('sendProposalFailed without detail omits the detail row', async () => {
+ const mailer = createMailer({
+ transport: 'memory',
+ from: 'no-reply@syscoin.dev',
+ publicBaseUrl: 'https://sysnode.info',
+ });
+ await mailer.sendProposalFailed({
+ to: 'author@example.com',
+ proposalName: 'x',
+ failReason: 'collateral_not_found',
+ submissionId: 1,
+ });
+ const msg = mailer.outbox[0];
+ expect(msg.html).not.toMatch(/]*>Detail<\/td>/i);
+ expect(msg.text).not.toMatch(/\nDetail:/);
+ });
+
+ test('sendProposalFailed HTML-escapes the fail detail', async () => {
+ const mailer = createMailer({
+ transport: 'memory',
+ from: 'no-reply@syscoin.dev',
+ publicBaseUrl: 'https://sysnode.info',
+ });
+ await mailer.sendProposalFailed({
+ to: 'author@example.com',
+ proposalName: 'x',
+ failReason: 'submit_rejected',
+ failDetail: ' ',
+ submissionId: 1,
+ });
+ const msg = mailer.outbox[0];
+ expect(msg.html).not.toContain(' ');
+ expect(msg.html).toContain(
+ '<img src=x onerror=alert(1)>'
+ );
+ });
});
diff --git a/lib/proposalDispatcher.js b/lib/proposalDispatcher.js
new file mode 100644
index 0000000..68e4f84
--- /dev/null
+++ b/lib/proposalDispatcher.js
@@ -0,0 +1,527 @@
+'use strict';
+
+// proposal dispatcher — advances proposal_submissions rows whose
+// status is `awaiting_collateral` toward either `submitted` or
+// `failed`.
+//
+// --- Responsibilities -------------------------------------------------
+// 1. Poll the collateral tx's confirmation count.
+// 2. Write the latest confirmation count back to the row so the UI
+// can show live progress (X / 6 confirmations).
+// 3. Once >= GOVERNANCE_FEE_CONFIRMATIONS, call gObject_submit with
+// the frozen canonical fields (parent_hash, revision, time,
+// dataHex, collateral_txid). Record the returned governance hash
+// and flip status → submitted.
+// 4. Fail closed after a bounded wait if the tx never appears on
+// chain (user entered the wrong txid or double-spent the output).
+//
+// --- Idempotency ------------------------------------------------------
+// `gObject_submit` on Syscoin Core accepts re-submissions (it stores
+// the object keyed by hash; a duplicate submit either no-ops or
+// returns the same hash). So re-running tick() after a partial
+// failure is safe: in the worst case we pay the RPC round-trip again
+// and write the same governance_hash we already had. That said, we
+// still scope work to `awaiting_collateral` rows — `submitted` and
+// `failed` are terminal, so a successful tick cannot be re-run.
+//
+// --- Why separate from the email reminder dispatcher ----------------
+// The existing reminderDispatcher is driven by time/deadlines and
+// runs hourly. This dispatcher is driven by on-chain state and needs
+// a much tighter cadence (2 min) to feel responsive to the user
+// watching their proposal finalize. Mixing the two would either
+// waste RPC calls (hourly is too slow for confirmations) or waste
+// mailer calls (2-min cadence on reminders would be excessive), so
+// they stay separate.
+
+// Matches Syscoin Core's GOVERNANCE_FEE_CONFIRMATIONS constant in
+// src/governance/governanceobject.h. Do NOT tune without first
+// checking that Core still enforces the same threshold — being
+// stricter than Core is safe, laxer is not.
+const REQUIRED_CONFS = 6;
+
+const MS_MINUTE = 60 * 1000;
+const MS_HOUR = 60 * MS_MINUTE;
+const MS_DAY = 24 * MS_HOUR;
+
+// Default time after which a row that never gets a valid collateral
+// tx is marked `failed`. Seven days is a generous ceiling — users
+// have a weekend-plus-workdays window to fix a broken txid. Adjust
+// in the factory opts if operational experience suggests otherwise.
+const DEFAULT_TIMEOUT_MS = 7 * MS_DAY;
+
+// Core's error strings we want to match to decide retry-worthiness.
+// Any match counts as a permanent rejection (terminal → failed).
+//
+// NOTE: we deliberately do NOT include /already exists/ here. A duplicate-
+// submission error from Core is ambiguous: it can mean (a) we actually
+// succeeded on a previous tick / worker / process and crashed before
+// markSubmitted could persist, or (b) another actor posted the exact
+// same governance object independently. In either case the governance
+// object IS live on-chain, so flipping the row to `failed` (and firing
+// a failure email) is a worse outcome than leaving it for the follow-up
+// resolver to verify and promote to `submitted`. We classify it as a
+// transient error and log loudly so operators can reconcile offline.
+// (Codex PR8 round 1 P2.)
+//
+// Codex PR8 round 10 P2: these patterns MUST be anchored to
+// validation phrasings Syscoin Core actually produces for
+// unrecoverable governance-object rejections. A blanket /invalid/i
+// catch is far too broad — transient RPC transport / JSON-parser
+// errors (e.g. "invalid JSON-RPC response", "invalid response
+// from server", "Invalid URL", socket errors wrapped with words
+// like "invalid utf-8 sequence") frequently contain the word
+// "invalid", and misclassifying those as permanent rejections
+// flips rows to `failed` on a temporary outage and fires a failure
+// email the user can never fix. Narrow to the exact phrases Core
+// emits from CGovernanceObject::IsValidLocally / gobject_submit's
+// explicit error branches (`Governance object is not valid`,
+// `Invalid parent hash`, `Invalid signature`, `Invalid object
+// type`), plus `Object submission rejected` which is Core's
+// explicit permanent-reject wrapper, and the pre-validation hash
+// mismatches. Anything else — including bare "invalid" in a
+// transport string — stays classified as transient and gets
+// retried.
+const TERMINAL_CORE_ERRORS = [
+ // Syscoin Core's masternode governance-object rate limiter is a
+ // permanent reject: the object hash is burned for this cycle. The
+ // exact RPC phrase is the one thrown from gobject(submit) at
+ // syscoin/src/rpc/governance.cpp:204:
+ // "Object creation rate limit exceeded"
+ // We anchor to that wording instead of a bare `/rate limit/i` so
+ // transport-layer 429s from HTTP proxies / providers (which also
+ // tend to carry the words "rate limit"/"rate-limited" but are
+ // transient) are NOT misclassified as terminal. A proxy 429 is
+ // recoverable by retrying; Core's rate-limit reject is not.
+ /Object creation rate limit exceeded/i,
+ /Object submission rejected/i,
+ /Governance object is not valid/i,
+ /Invalid parent hash/i,
+ /Invalid (?:object )?signature/i,
+ /Invalid object type/i,
+ /Invalid proposal/i,
+ /Invalid data hex/i,
+ /hash mismatch/i,
+];
+
+const DUPLICATE_CORE_ERROR = /already exists/i;
+
+function isTerminalCoreError(err) {
+ const msg = String((err && (err.message || err.reason)) || '');
+ if (DUPLICATE_CORE_ERROR.test(msg)) return false;
+ return TERMINAL_CORE_ERRORS.some((re) => re.test(msg));
+}
+
+function isDuplicateCoreError(err) {
+ const msg = String((err && (err.message || err.reason)) || '');
+ return DUPLICATE_CORE_ERROR.test(msg);
+}
+
+function createProposalDispatcher({
+ submissions,
+ rpc,
+ log = () => {},
+ now = () => Date.now(),
+ opts = {},
+ // Best-effort callbacks fired after a state transition. These are
+ // the mailer hook: production wiring passes an onSubmitted that
+ // looks up the user and sends sendProposalSubmitted, and an
+ // onFailed that sends sendProposalFailed. Both receive the freshly
+ // re-read submission row so the callback never has to trust what
+ // the dispatcher "just wrote" (and, importantly, can never mutate
+ // dispatcher state by accident). A callback throw is logged and
+ // swallowed — the state transition itself already succeeded.
+ onSubmitted = null,
+ onFailed = null,
+} = {}) {
+ if (!submissions || typeof submissions.listByStatus !== 'function') {
+ throw new Error('submissions repo is required');
+ }
+ if (!rpc || typeof rpc.getRawTransaction !== 'function') {
+ throw new Error('rpc.getRawTransaction is required');
+ }
+ if (typeof rpc.gObjectSubmit !== 'function') {
+ throw new Error('rpc.gObjectSubmit is required');
+ }
+ if (onSubmitted !== null && typeof onSubmitted !== 'function') {
+ throw new Error('onSubmitted must be a function or null');
+ }
+ if (onFailed !== null && typeof onFailed !== 'function') {
+ throw new Error('onFailed must be a function or null');
+ }
+
+ const requiredConfs = Number.isInteger(opts.requiredConfs)
+ ? opts.requiredConfs
+ : REQUIRED_CONFS;
+ const timeoutMs = Number.isInteger(opts.timeoutMs)
+ ? opts.timeoutMs
+ : DEFAULT_TIMEOUT_MS;
+
+ // Swallow-and-log wrapper around the user-supplied hooks. We rely
+ // on the caller to eat their own errors in prod, but tolerating a
+ // crash here is strictly safer than letting a mailer blow up the
+ // dispatcher loop (which would then stop advancing every other
+ // row). Keep the hook call async-aware so async mailers work.
+ async function fireHook(name, fn, arg) {
+ if (!fn) return;
+ try {
+ await fn(arg);
+ } catch (err) {
+ log('error', 'hook_threw', {
+ hook: name,
+ id: arg && arg.submission && arg.submission.id,
+ msg: String(err && err.message),
+ });
+ }
+ }
+
+ // Processes ONE row. Broken out so the tick loop can isolate per-row
+ // failures — an error on row A must not stop us from handling row B.
+ async function advance(row) {
+ const { id, collateralTxid } = row;
+ if (!collateralTxid) {
+ // Shouldn't happen: attachCollateral enforces a non-null txid
+ // before status flips to awaiting_collateral. Defensive only.
+ log('warn', 'awaiting_collateral_without_txid', { id });
+ return;
+ }
+
+ // 1. Look up the tx. Core throws "No such mempool or blockchain
+ // transaction" if it's unknown. Don't treat "unknown" as a
+ // hard failure right away — the user may have JUST broadcast
+ // and their node hasn't seen it yet. Only fail after timeoutMs.
+ let tx;
+ try {
+ tx = await rpc.getRawTransaction(collateralTxid, 1);
+ } catch (err) {
+ const msg = String((err && err.message) || err);
+ log('debug', 'getRawTransaction_error', { id, txid: collateralTxid, msg });
+ const waitedMs = now() - row.updatedAt;
+ if (/No such mempool or blockchain/i.test(msg) && waitedMs > timeoutMs) {
+ let failedRow = null;
+ try {
+ failedRow = submissions.markFailed(id, {
+ reason: 'collateral_not_found',
+ detail: `Collateral tx ${collateralTxid} was not found after ${Math.round(
+ waitedMs / MS_HOUR
+ )}h.`,
+ });
+ log('warn', 'marked_failed_collateral_not_found', { id });
+ } catch (markErr) {
+ log('error', 'markFailed_threw', {
+ id,
+ msg: String(markErr && markErr.message),
+ });
+ }
+ if (failedRow) {
+ await fireHook('onFailed', onFailed, { submission: failedRow });
+ }
+ }
+ return;
+ }
+
+ // `confirmations` is absent when the tx is in mempool only; in
+ // some builds it may be 0. Normalize to a non-negative integer.
+ const confs =
+ Number.isFinite(Number(tx && tx.confirmations))
+ ? Math.max(0, Math.trunc(Number(tx.confirmations)))
+ : 0;
+
+ // 2. Write latest conf count unless it'd be a no-op.
+ if (confs !== row.collateralConfs) {
+ try {
+ submissions.updateConfirmations(id, confs);
+ } catch (err) {
+ log('error', 'updateConfirmations_failed', {
+ id,
+ msg: String(err && err.message),
+ });
+ return;
+ }
+ log('info', 'confs_updated', { id, confs });
+ }
+
+ if (confs < requiredConfs) {
+ return;
+ }
+
+ // 3. Submit. Build args from the frozen snapshot. If ANYTHING
+ // changes between prepare-time and now, the collateral OP_RETURN
+ // won't match and Core rejects us — which is exactly why the
+ // hashing fields are immutable on the repo.
+ let govHash;
+ try {
+ govHash = await rpc.gObjectSubmit(
+ row.parentHash,
+ row.revision,
+ row.timeUnix,
+ row.dataHex,
+ row.collateralTxid
+ );
+ } catch (err) {
+ // Terminal errors (validation, rate limit, "already exists")
+ // are not going to get better on retry — fail the row so the
+ // user gets a clear message instead of a silent retry loop.
+ if (isTerminalCoreError(err)) {
+ let failedRow = null;
+ try {
+ failedRow = submissions.markFailed(id, {
+ reason: 'submit_rejected',
+ detail: String((err && err.message) || err),
+ });
+ log('warn', 'submit_rejected', {
+ id,
+ msg: String((err && err.message) || err),
+ });
+ } catch (markErr) {
+ log('error', 'markFailed_after_submit_reject_threw', {
+ id,
+ msg: String(markErr && markErr.message),
+ });
+ }
+ if (failedRow) {
+ await fireHook('onFailed', onFailed, { submission: failedRow });
+ }
+ } else if (isDuplicateCoreError(err)) {
+ // "Governance object already exists" — the object is live on
+ // chain. Core indexes governance objects by CGovernanceObject::
+ // GetHash(parent, rev, time, vchData, outpoint, sig), which is
+ // exactly what computeProposalHash() reproduces at prepare
+ // time (outpoint + sig are empty for user-submitted top-level
+ // proposals). So the existing on-chain hash MUST equal our
+ // frozen row.proposalHash.
+ //
+ // Codex PR8 round 2 P1: previously we only logged here and
+ // left the row in awaiting_collateral, which meant a genuine
+ // duplicate (crash between gObjectSubmit succeeding and
+ // markSubmitted persisting, or two dispatcher workers racing)
+ // would loop forever with no terminal transition and no user
+ // signal. Resolve by promoting the row with our known hash.
+ log('warn', 'submit_already_exists', {
+ id,
+ msg: String((err && err.message) || err),
+ });
+ let submittedRow = null;
+ let clashed = false;
+ try {
+ submittedRow = submissions.markSubmitted(id, {
+ governanceHash: row.proposalHash,
+ });
+ } catch (markErr) {
+ // Two CAS failure modes to distinguish:
+ // - status_not_awaiting: another dispatcher worker
+ // already moved THIS row forward. The state machine
+ // already reflects the truth; don't loop, don't
+ // double-fire hooks.
+ // - governance_hash_clash: a DIFFERENT row already
+ // claimed this governance hash (the UNIQUE index on
+ // `governance_hash` rejected our UPDATE). The on-
+ // chain object exists but belongs to that other row
+ // on our books — ours is a redundant duplicate that
+ // will never be promoted. Codex PR8 round 10 P1:
+ // previously we only logged and exited here, so the
+ // row stayed in `awaiting_collateral` and every
+ // subsequent tick re-hit Core's "already exists"
+ // and looped forever with no terminal user-visible
+ // outcome. Mark it terminally failed with a stable
+ // reason so the user gets a clear signal and the
+ // dispatcher stops spinning on it.
+ clashed = markErr && markErr.code === 'governance_hash_clash';
+ log('warn', 'markSubmitted_after_duplicate_failed', {
+ id,
+ code: markErr && markErr.code,
+ msg: String((markErr && markErr.message) || markErr),
+ });
+ }
+ if (submittedRow) {
+ log('info', 'submitted_via_duplicate', {
+ id,
+ governanceHash: row.proposalHash,
+ });
+ await fireHook('onSubmitted', onSubmitted, {
+ submission: submittedRow,
+ });
+ } else if (clashed) {
+ // Terminal cleanup for the duplicate-hash case. Use
+ // markFailed's CAS to avoid racing with yet another
+ // dispatcher worker that might also be processing this
+ // row. If markFailed returns null we know someone else
+ // already transitioned the row to a terminal state.
+ let failedRow = null;
+ try {
+ failedRow = submissions.markFailed(id, {
+ reason: 'duplicate_governance_hash',
+ detail:
+ 'The on-chain governance object with this hash is ' +
+ 'already tracked by another submission row. ' +
+ 'Collateral has been consumed on-chain; the original ' +
+ 'row carries the live status.',
+ });
+ } catch (markErr) {
+ // status_terminal is the only expected throw — yet
+ // another worker won the race to terminal. Safe to
+ // ignore; no side effects owed by us.
+ log('warn', 'markFailed_after_hash_clash_threw', {
+ id,
+ code: markErr && markErr.code,
+ msg: String((markErr && markErr.message) || markErr),
+ });
+ }
+ if (failedRow) {
+ log('warn', 'failed_duplicate_governance_hash', { id });
+ await fireHook('onFailed', onFailed, { submission: failedRow });
+ }
+ }
+ // A null submittedRow + !clashed here means the CAS UPDATE
+ // matched zero rows — another dispatcher worker already
+ // promoted this row via the same duplicate path. No log
+ // is emitted because the competing branches above handle
+ // both failure modes (throws and CAS-miss null)
+ // symmetrically: neither fires onSubmitted, and a
+ // persistent race would surface through either the
+ // already-exists branch repeating or the winner's
+ // 'submitted' log. (Codex round 5 P1.)
+ } else {
+ // Transient (network, node restart). Leave the row as-is and
+ // retry next tick.
+ log('info', 'submit_transient_error', {
+ id,
+ msg: String((err && err.message) || err),
+ });
+ }
+ return;
+ }
+
+ // Core returns the hash as a 64-char hex string. Normalize and
+ // verify — an unexpected shape is a bug we'd rather loud-fail on
+ // than silently persist.
+ const hashStr =
+ typeof govHash === 'string'
+ ? govHash.toLowerCase().trim()
+ : '';
+ if (!/^[0-9a-f]{64}$/.test(hashStr)) {
+ log('error', 'gObject_submit_bad_response', {
+ id,
+ raw: typeof govHash === 'string' ? govHash : JSON.stringify(govHash),
+ });
+ return;
+ }
+
+ // 4. Flip status → submitted. Even this can race with a concurrent
+ // tick or an operator running the same dispatcher twice; the
+ // repo's compare-and-swap (Codex round 5 P1) turns the race
+ // into a silent null return — the losing worker must NOT log
+ // "submitted" or fire onSubmitted, because the winning worker
+ // already did. Treat null the same as a throw: observability
+ // only, no side effects.
+ let submittedRow = null;
+ let clashed = false;
+ let raceAlreadyLogged = false;
+ try {
+ submittedRow = submissions.markSubmitted(id, {
+ governanceHash: hashStr,
+ });
+ } catch (err) {
+ // status_not_awaiting: another dispatcher worker already moved
+ // this row forward. State machine reflects the truth; don't
+ // loop, don't double-fire hooks.
+ // governance_hash_clash: Core accepted our submit and returned
+ // a real hash, but a DIFFERENT row already claimed that
+ // governance hash on our books — so the object is live on-
+ // chain under the other row's identity. Ours is a redundant
+ // duplicate that will never be promoted to submitted from
+ // this path either. Must mirror the duplicate-error branch
+ // and transition the row to terminal `failed` with reason
+ // `duplicate_governance_hash`, or this tick-path leaves
+ // the row stuck in awaiting_collateral and every subsequent
+ // tick repeats the same submit → same hash → same clash
+ // forever with no user-visible terminal outcome.
+ clashed = err && err.code === 'governance_hash_clash';
+ log('warn', 'markSubmitted_raced', {
+ id,
+ code: err && err.code,
+ msg: String(err && err.message),
+ });
+ raceAlreadyLogged = true;
+ }
+ if (submittedRow) {
+ log('info', 'submitted', { id, governanceHash: hashStr });
+ await fireHook('onSubmitted', onSubmitted, { submission: submittedRow });
+ return;
+ }
+ if (clashed) {
+ let failedRow = null;
+ try {
+ failedRow = submissions.markFailed(id, {
+ reason: 'duplicate_governance_hash',
+ detail:
+ 'Core accepted the submission and returned a governance ' +
+ 'hash that is already tracked by another submission row. ' +
+ 'Collateral has been consumed on-chain; the original row ' +
+ 'carries the live status.',
+ });
+ } catch (markErr) {
+ log('error', 'markFailed_after_clash_failed', {
+ id,
+ msg: String(markErr && markErr.message),
+ });
+ }
+ if (failedRow) {
+ log('warn', 'failed_duplicate_governance_hash', { id });
+ await fireHook('onFailed', onFailed, { submission: failedRow });
+ }
+ return;
+ }
+ if (!raceAlreadyLogged) {
+ // CAS miss: another worker raced us and already promoted the
+ // row between our pre-read status check and the UPDATE.
+ // markSubmitted returned null instead of throwing. Log so the
+ // race is visible but do NOT emit another submitted-side
+ // effect — the winner already did.
+ log('warn', 'markSubmitted_raced', {
+ id,
+ code: 'cas_miss',
+ msg: 'row transitioned out of awaiting_collateral before UPDATE',
+ });
+ }
+ }
+
+ async function tick() {
+ let rows;
+ try {
+ rows = submissions.listByStatus('awaiting_collateral');
+ } catch (err) {
+ log('error', 'listByStatus_failed', {
+ msg: String(err && err.message),
+ });
+ return { advanced: 0, failed: 0 };
+ }
+ let advanced = 0;
+ let failed = 0;
+ for (const row of rows) {
+ const before = row.status;
+ try {
+ await advance(row);
+ } catch (err) {
+ failed += 1;
+ log('error', 'advance_threw', {
+ id: row.id,
+ msg: String(err && err.message),
+ });
+ continue;
+ }
+ // Re-read to count real transitions.
+ const after = submissions.getById(row.id);
+ if (after && after.status !== before) advanced += 1;
+ }
+ return { advanced, failed, scanned: rows.length };
+ }
+
+ return { tick, REQUIRED_CONFS: requiredConfs };
+}
+
+module.exports = {
+ createProposalDispatcher,
+ REQUIRED_CONFS,
+ DEFAULT_TIMEOUT_MS,
+ TERMINAL_CORE_ERRORS,
+};
diff --git a/lib/proposalDispatcher.test.js b/lib/proposalDispatcher.test.js
new file mode 100644
index 0000000..2a37bd3
--- /dev/null
+++ b/lib/proposalDispatcher.test.js
@@ -0,0 +1,747 @@
+'use strict';
+
+const { openDatabase } = require('./db');
+const { createProposalSubmissionsRepo } = require('./proposalSubmissions');
+const {
+ createProposalDispatcher,
+ REQUIRED_CONFS,
+ DEFAULT_TIMEOUT_MS,
+} = require('./proposalDispatcher');
+
+const FAKE_SALT_V = 'aa'.repeat(32);
+
+function seedUser(db, email = 'u@x.com') {
+ const t = Date.now();
+ const r = db
+ .prepare(
+ `INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?)`
+ )
+ .run(email, 'h', FAKE_SALT_V, t, t);
+ return Number(r.lastInsertRowid);
+}
+
+function makeSubmission(repo, userId, overrides = {}) {
+ return repo.create({
+ userId,
+ parentHash: '0',
+ revision: 1,
+ timeUnix: 1800000000,
+ dataHex: '7b2274797065223a317d',
+ proposalHash: 'a'.repeat(64),
+ title: 'T',
+ name: 'name',
+ url: 'https://example.org/p',
+ paymentAddress: 'sys1qabcdefghij1234567890',
+ paymentAmountSats: 100n,
+ paymentCount: 1,
+ startEpoch: 1800000000,
+ endEpoch: 1802592000,
+ ...overrides,
+ });
+}
+
+// A minimal fake RPC that lets us script per-txid responses and
+// per-call behavior for gObjectSubmit. Keeps tests declarative.
+function makeFakeRpc() {
+ const txs = new Map(); // txid (lowercase) -> { confirmations } | error (Error instance)
+ const submitScript = []; // FIFO: each entry is { result } or { error }
+ const calls = { getRawTransaction: [], gObjectSubmit: [] };
+
+ return {
+ txs,
+ submitScript,
+ calls,
+ rpc: {
+ async getRawTransaction(txid /* , verbose */) {
+ calls.getRawTransaction.push(txid);
+ const entry = txs.get(txid.toLowerCase());
+ if (entry instanceof Error) throw entry;
+ if (!entry) {
+ const e = new Error('No such mempool or blockchain transaction');
+ throw e;
+ }
+ return entry;
+ },
+ async gObjectSubmit(parentHash, revision, time, dataHex, txid) {
+ calls.gObjectSubmit.push({ parentHash, revision, time, dataHex, txid });
+ if (submitScript.length === 0) {
+ throw new Error('fakeRpc: no submit script entries queued');
+ }
+ const next = submitScript.shift();
+ if (next.error) throw next.error;
+ return next.result;
+ },
+ },
+ };
+}
+
+function setup() {
+ const db = openDatabase(':memory:');
+ const userId = seedUser(db);
+ let clock = 1_700_000_000_000;
+ const repo = createProposalSubmissionsRepo(db, { now: () => clock });
+ const logs = [];
+ const fake = makeFakeRpc();
+ const dispatcher = createProposalDispatcher({
+ submissions: repo,
+ rpc: fake.rpc,
+ log: (level, event, meta) => logs.push({ level, event, meta }),
+ now: () => clock,
+ });
+ return {
+ db,
+ repo,
+ dispatcher,
+ fake,
+ logs,
+ userId,
+ tick: (ms) => {
+ clock += ms;
+ return clock;
+ },
+ };
+}
+
+// ---------------- confirmation tracking ----------------
+describe('proposalDispatcher — confirmation tracking', () => {
+ test('exports REQUIRED_CONFS = 6', () => {
+ expect(REQUIRED_CONFS).toBe(6);
+ });
+
+ test('records confs from RPC, does not submit before threshold', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'f'.repeat(64));
+ fake.txs.set('f'.repeat(64), { confirmations: 3 });
+ const stats = await dispatcher.tick();
+ expect(stats.scanned).toBe(1);
+ const after = repo.getById(s.id);
+ expect(after.collateralConfs).toBe(3);
+ expect(after.status).toBe('awaiting_collateral');
+ expect(fake.calls.gObjectSubmit.length).toBe(0);
+ });
+
+ test('0 confirmations (mempool only) is tolerated', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'f'.repeat(64));
+ // No `confirmations` field — mempool-only shape
+ fake.txs.set('f'.repeat(64), { txid: 'f'.repeat(64) });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).collateralConfs).toBe(0);
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ });
+
+ test('skipped rows that are prepared/submitted/failed', async () => {
+ const { repo, dispatcher, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ // Still in 'prepared' — dispatcher should not touch it
+ const stats = await dispatcher.tick();
+ expect(stats.scanned).toBe(0);
+ expect(repo.getById(s.id).status).toBe('prepared');
+ });
+});
+
+// ---------------- submit once mature ----------------
+describe('proposalDispatcher — submission', () => {
+ test('submits when confs >= threshold and flips to submitted', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: 'b'.repeat(64) });
+
+ const stats = await dispatcher.tick();
+ expect(stats.advanced).toBe(1);
+ expect(fake.calls.gObjectSubmit).toHaveLength(1);
+ expect(fake.calls.gObjectSubmit[0]).toMatchObject({
+ parentHash: '0',
+ revision: 1,
+ time: 1800000000,
+ dataHex: '7b2274797065223a317d',
+ txid: 'a'.repeat(64),
+ });
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('submitted');
+ expect(after.governanceHash).toBe('b'.repeat(64));
+ });
+
+ test('accepts governance hash in uppercase and lowercases it', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 10 });
+ fake.submitScript.push({ result: 'B'.repeat(64) });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).governanceHash).toBe('b'.repeat(64));
+ });
+
+ test('bad response shape: leaves row in awaiting_collateral and logs', async () => {
+ const { repo, dispatcher, fake, logs, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: { not: 'a string' } });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ expect(logs.find((l) => l.event === 'gObject_submit_bad_response')).toBeTruthy();
+ });
+
+ test('terminal Core error → row marked failed', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({
+ error: new Error(
+ 'Governance object is not valid - b...b - payment_address is invalid'
+ ),
+ });
+ await dispatcher.tick();
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('failed');
+ expect(after.failReason).toBe('submit_rejected');
+ expect(after.failDetail).toMatch(/payment_address is invalid/);
+ });
+
+ test('"already exists" Core error → row promotes to submitted with frozen hash (no false failure, terminates loop)', async () => {
+ // Codex PR8 round 1 P2: duplicate-submit errors must NOT mark
+ // the row failed — the object is already live on chain.
+ //
+ // Codex PR8 round 2 P1: but leaving the row in
+ // awaiting_collateral forever (the first-round fix) is also wrong:
+ // if Core keeps returning "already exists", the row has no
+ // terminal transition and the user never gets a completion
+ // signal. Core indexes govobj by CGovernanceObject::GetHash(),
+ // which is exactly what computeProposalHash() reproduces at
+ // prepare time, so the on-chain hash == our frozen proposalHash.
+ // Promote using that hash.
+ const { repo, dispatcher, fake, userId, logs } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({
+ error: new Error(
+ "Governance object already exists in the node's object store"
+ ),
+ });
+ await dispatcher.tick();
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('submitted');
+ expect(after.failReason).toBeNull();
+ expect(after.governanceHash).toBe(s.proposalHash);
+ expect(logs.find((l) => l.event === 'submit_already_exists')).toBeTruthy();
+ expect(
+ logs.find((l) => l.event === 'submitted_via_duplicate')
+ ).toBeTruthy();
+ });
+
+ test(
+ '"already exists" where repo rejects markSubmitted (hash clash) → terminal failed with duplicate_governance_hash (Codex round 10 P1)',
+ async () => {
+ // Rare path: another row already claims this governance_hash
+ // (a concurrent dispatcher tick beat us; or — very unlikely
+ // — two users submitted identical canonical text at the same
+ // unix timestamp). The on-chain governance object exists but
+ // is tracked by the OTHER row on our books, and the UNIQUE
+ // index on governance_hash will keep rejecting our UPDATE
+ // forever.
+ //
+ // Round 9 behavior (left row in awaiting_collateral) caused
+ // the dispatcher to retry the same row every tick forever
+ // with no terminal user-visible outcome. Round 10 fix: flip
+ // to terminal `failed` with reason `duplicate_governance_hash`
+ // so the dispatcher stops spinning AND the user gets a
+ // final, actionable notification.
+ // Use the local setup() (which doesn't take hooks) and wire
+ // onFailed via a fresh dispatcher factory below, so we can
+ // observe the emitted event.
+ const { repo, fake, userId, logs } = setup();
+ const failedEvents = [];
+ const localDispatcher = createProposalDispatcher({
+ submissions: repo,
+ rpc: fake.rpc,
+ log: (level, event, meta) => logs.push({ level, event, meta }),
+ onFailed: (a) => failedEvents.push(a),
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+
+ // Pre-plant a different row that already owns this governance
+ // hash, so the UNIQUE index will fire on markSubmitted.
+ const s2 = makeSubmission(repo, userId, { proposalName: 'other' });
+ repo.attachCollateral(s2.id, userId, 'b'.repeat(64));
+ repo.markSubmitted(s2.id, { governanceHash: s.proposalHash });
+
+ fake.submitScript.push({
+ error: new Error(
+ "Governance object already exists in the node's object store"
+ ),
+ });
+ await localDispatcher.tick();
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('failed');
+ expect(after.failReason).toBe('duplicate_governance_hash');
+ expect(
+ logs.find((l) => l.event === 'markSubmitted_after_duplicate_failed')
+ ).toBeTruthy();
+ expect(
+ logs.find((l) => l.event === 'failed_duplicate_governance_hash')
+ ).toBeTruthy();
+ // onFailed must fire so the user gets the notification.
+ expect(failedEvents).toHaveLength(1);
+ expect(failedEvents[0].submission.id).toBe(s.id);
+ expect(failedEvents[0].submission.failReason).toBe(
+ 'duplicate_governance_hash'
+ );
+
+ // And crucially: a second dispatcher tick does NOT retry this
+ // row (it's now terminal, so listByStatus('awaiting_collateral')
+ // won't see it).
+ fake.submitScript.push({
+ error: new Error(
+ "Governance object already exists in the node's object store"
+ ),
+ });
+ const submitsBefore = fake.calls.gObjectSubmit.length;
+ await localDispatcher.tick();
+ expect(fake.calls.gObjectSubmit.length).toBe(submitsBefore);
+ }
+ );
+
+ test(
+ 'SUCCESS gObject_submit with governance_hash_clash from repo → row marked failed duplicate_governance_hash (Codex round 13 P1)',
+ async () => {
+ // Companion to the round-10 duplicate-error path, but for the
+ // SUCCESS branch: Core returned a real hex hash (not an
+ // "already exists" error), yet our UNIQUE index on
+ // governance_hash rejects the UPDATE because another row
+ // already claims that hash. Pre-round-13 code only logged the
+ // clash and returned, leaving the row in awaiting_collateral
+ // forever. Round-13 fix: mirror the duplicate-error path and
+ // flip the row to terminal failed(duplicate_governance_hash)
+ // so the dispatcher stops spinning and the user sees a
+ // terminal outcome + onFailed notification.
+ const { repo, fake, userId, logs } = setup();
+ const failedEvents = [];
+ const submittedEvents = [];
+ const localDispatcher = createProposalDispatcher({
+ submissions: repo,
+ rpc: fake.rpc,
+ log: (level, event, meta) => logs.push({ level, event, meta }),
+ onFailed: (a) => failedEvents.push(a),
+ onSubmitted: (a) => submittedEvents.push(a),
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+
+ // Pre-plant a different row that already owns the hash Core
+ // will return below, forcing markSubmitted to throw
+ // governance_hash_clash from the UNIQUE index.
+ const clashHash = 'c'.repeat(64);
+ const s2 = makeSubmission(repo, userId, { proposalName: 'other' });
+ repo.attachCollateral(s2.id, userId, 'b'.repeat(64));
+ repo.markSubmitted(s2.id, { governanceHash: clashHash });
+
+ // Core accepts our submit and returns the (already-taken) hash.
+ fake.submitScript.push({ result: clashHash });
+ await localDispatcher.tick();
+
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('failed');
+ expect(after.failReason).toBe('duplicate_governance_hash');
+ // No phantom onSubmitted — winning row already owns the hook.
+ expect(submittedEvents).toHaveLength(0);
+ // onFailed fires so the user gets a terminal notification.
+ expect(failedEvents).toHaveLength(1);
+ expect(failedEvents[0].submission.id).toBe(s.id);
+ expect(failedEvents[0].submission.failReason).toBe(
+ 'duplicate_governance_hash'
+ );
+ expect(
+ logs.find((l) => l.event === 'failed_duplicate_governance_hash')
+ ).toBeTruthy();
+
+ // Second tick must be a no-op — the row is terminal now and
+ // must not be retried.
+ const submitsBefore = fake.calls.gObjectSubmit.length;
+ await localDispatcher.tick();
+ expect(fake.calls.gObjectSubmit.length).toBe(submitsBefore);
+ }
+ );
+
+ test('transient Core error → row stays awaiting_collateral, will retry', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ error: new Error('ECONNREFUSED') });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ });
+
+ test('two back-to-back ticks: first submits, second is a no-op', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: 'c'.repeat(64) });
+
+ await dispatcher.tick();
+ // Second tick — no new submit scripting queued; a rescan should
+ // find no rows in awaiting_collateral so gObjectSubmit isn't called.
+ await dispatcher.tick();
+ expect(fake.calls.gObjectSubmit.length).toBe(1);
+ expect(repo.getById(s.id).status).toBe('submitted');
+ });
+
+ test('multiple rows: error in one doesn\u2019t block the others', async () => {
+ const { repo, dispatcher, fake, userId } = setup();
+ // Different dataHex to avoid the partial unique index that
+ // guarantees a user can't have two `prepared` rows for the same
+ // canonical payload (Codex round 3 P2).
+ const a = makeSubmission(repo, userId, { proposalHash: 'a'.repeat(64) });
+ const b = makeSubmission(repo, userId, {
+ proposalHash: 'b'.repeat(64),
+ dataHex: '7b2274797065223a327d',
+ });
+ repo.attachCollateral(a.id, userId, '1'.repeat(64));
+ repo.attachCollateral(b.id, userId, '2'.repeat(64));
+ fake.txs.set('1'.repeat(64), { confirmations: 6 });
+ fake.txs.set('2'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ error: new Error('ECONNREFUSED') }); // a transient
+ fake.submitScript.push({ result: 'd'.repeat(64) }); // b submits OK
+
+ await dispatcher.tick();
+ expect(repo.getById(a.id).status).toBe('awaiting_collateral');
+ expect(repo.getById(b.id).status).toBe('submitted');
+ });
+});
+
+// ---------------- missing collateral tx ----------------
+describe('proposalDispatcher — missing tx handling', () => {
+ test('tx not found (fresh row): row untouched, retries next tick', async () => {
+ const { repo, dispatcher, userId } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ // No tx in fake.txs → "No such mempool" thrown
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ });
+
+ test('tx not found after timeout → marked failed', async () => {
+ const { repo, dispatcher, userId, tick } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ tick(DEFAULT_TIMEOUT_MS + 1);
+ await dispatcher.tick();
+ const after = repo.getById(s.id);
+ expect(after.status).toBe('failed');
+ expect(after.failReason).toBe('collateral_not_found');
+ });
+
+ test('other RPC errors do NOT trigger timeout-based fail', async () => {
+ const { repo, dispatcher, fake, userId, tick } = setup();
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), new Error('Node overloaded, try again'));
+ tick(DEFAULT_TIMEOUT_MS + 1);
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ });
+});
+
+// ---------------- onSubmitted / onFailed hooks ----------------
+//
+// These are the seam the mailer hooks into. The dispatcher owns state
+// transitions; hooks ride along so the mailer never observes an
+// in-flight row. Hook semantics we verify:
+// 1. onSubmitted fires ONCE per real prepared → submitted transition
+// 2. onFailed fires on both failure paths (timeout + terminal Core err)
+// 3. Hook receives the freshly re-read row (status reflects transition)
+// 4. Neither hook fires when the transition doesn't happen
+// 5. Hook exceptions are swallowed and logged, never bubble up
+// 6. An async hook is awaited (rather than fire-and-forget)
+describe('proposalDispatcher — success/fail hooks', () => {
+ function setupWithHooks({ onSubmitted, onFailed } = {}) {
+ const db = openDatabase(':memory:');
+ const userId = seedUser(db);
+ let clock = 1_700_000_000_000;
+ const repo = createProposalSubmissionsRepo(db, { now: () => clock });
+ const logs = [];
+ const fake = makeFakeRpc();
+ const dispatcher = createProposalDispatcher({
+ submissions: repo,
+ rpc: fake.rpc,
+ log: (level, event, meta) => logs.push({ level, event, meta }),
+ now: () => clock,
+ onSubmitted,
+ onFailed,
+ });
+ return {
+ db,
+ repo,
+ dispatcher,
+ fake,
+ logs,
+ userId,
+ tick: (ms) => {
+ clock += ms;
+ return clock;
+ },
+ };
+ }
+
+ test('onSubmitted fires with the submitted row, exactly once', async () => {
+ const events = [];
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onSubmitted: (arg) => {
+ events.push(arg);
+ },
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: 'b'.repeat(64) });
+
+ await dispatcher.tick();
+
+ expect(events).toHaveLength(1);
+ expect(events[0].submission.id).toBe(s.id);
+ expect(events[0].submission.status).toBe('submitted');
+ expect(events[0].submission.governanceHash).toBe('b'.repeat(64));
+
+ // Second tick: nothing to advance → hook must not re-fire.
+ await dispatcher.tick();
+ expect(events).toHaveLength(1);
+ });
+
+ test('onFailed fires on timeout (collateral_not_found)', async () => {
+ const events = [];
+ const { repo, dispatcher, userId, tick } = setupWithHooks({
+ onFailed: (arg) => {
+ events.push(arg);
+ },
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ tick(DEFAULT_TIMEOUT_MS + 1);
+ await dispatcher.tick();
+ expect(events).toHaveLength(1);
+ expect(events[0].submission.id).toBe(s.id);
+ expect(events[0].submission.status).toBe('failed');
+ expect(events[0].submission.failReason).toBe('collateral_not_found');
+ });
+
+ test('onFailed fires on terminal Core error (submit_rejected)', async () => {
+ const events = [];
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onFailed: (arg) => {
+ events.push(arg);
+ },
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({
+ error: new Error('Governance object is not valid - payment_address is invalid'),
+ });
+ await dispatcher.tick();
+ expect(events).toHaveLength(1);
+ expect(events[0].submission.status).toBe('failed');
+ expect(events[0].submission.failReason).toBe('submit_rejected');
+ expect(events[0].submission.failDetail).toMatch(/payment_address/);
+ });
+
+ test('hooks do not fire for non-transitions (transient error)', async () => {
+ const submittedEvents = [];
+ const failedEvents = [];
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onSubmitted: (a) => submittedEvents.push(a),
+ onFailed: (a) => failedEvents.push(a),
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ error: new Error('ECONNREFUSED') });
+ await dispatcher.tick();
+ expect(submittedEvents).toHaveLength(0);
+ expect(failedEvents).toHaveLength(0);
+ // Row still awaiting — proves no failure-state transition happened.
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ });
+
+ test('async hook is awaited', async () => {
+ let resolved = false;
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onSubmitted: async () => {
+ await new Promise((r) => setTimeout(r, 10));
+ resolved = true;
+ },
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: 'c'.repeat(64) });
+ await dispatcher.tick();
+ expect(resolved).toBe(true);
+ });
+
+ test('hook that throws is swallowed and logged', async () => {
+ const { repo, dispatcher, fake, logs, userId } = setupWithHooks({
+ onSubmitted: () => {
+ throw new Error('mailer fire');
+ },
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+ fake.submitScript.push({ result: 'e'.repeat(64) });
+ const stats = await dispatcher.tick();
+ expect(stats.advanced).toBe(1);
+ expect(repo.getById(s.id).status).toBe('submitted');
+ expect(logs.find((l) => l.event === 'hook_threw')).toBeTruthy();
+ });
+
+ test('rejects non-function / non-null onSubmitted', () => {
+ expect(() =>
+ createProposalDispatcher({
+ submissions: { listByStatus: () => [] },
+ rpc: { getRawTransaction: () => {}, gObjectSubmit: () => {} },
+ onSubmitted: 'nope',
+ })
+ ).toThrow(/onSubmitted/);
+ });
+
+ test(
+ 'transient RPC error containing the word "invalid" is NOT treated as terminal (Codex round 10 P2)',
+ async () => {
+ // Regression: the previous TERMINAL_CORE_ERRORS list had a
+ // blanket /invalid/i catch, so any transport / parser error
+ // whose message happened to contain the word "invalid" —
+ // very common in JSON-RPC client libraries ("invalid
+ // JSON-RPC response", "invalid response from server",
+ // "invalid utf-8 sequence", socket error wrappers, etc.) —
+ // got misclassified as a permanent Core rejection, flipped
+ // the row to `failed`, and fired a user-visible failure
+ // email the user could never actually fix.
+ //
+ // The fix narrows the list to phrases Syscoin Core actually
+ // emits for validation failures from gobject_submit /
+ // CGovernanceObject::IsValidLocally. Anything else — bare
+ // "invalid" in a transport error included — stays classified
+ // as transient and is retried on the next tick.
+ const failedEvents = [];
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onFailed: (a) => failedEvents.push(a),
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+
+ // Exactly the class of error Codex flagged: transport /
+ // parser failure wrapped with the word "invalid". Must NOT
+ // terminate the row.
+ fake.submitScript.push({
+ error: new Error('invalid JSON-RPC response from server'),
+ });
+ await dispatcher.tick();
+
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ expect(failedEvents).toHaveLength(0);
+
+ // And the previously-terminal Core phrasings still terminate.
+ fake.submitScript.push({
+ error: new Error(
+ 'Governance object is not valid - payment_address is invalid'
+ ),
+ });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('failed');
+ expect(repo.getById(s.id).failReason).toBe('submit_rejected');
+ expect(failedEvents).toHaveLength(1);
+ }
+ );
+
+ test(
+ 'transient proxy 429 "rate limit" is NOT terminal; only Core\'s exact "Object creation rate limit exceeded" is (Codex round 12 P1)',
+ async () => {
+ // Regression: the previous `/rate limit/i` pattern was too
+ // broad. HTTP proxies / RPC providers routinely return 429
+ // with bodies like "rate limit exceeded, try again later"
+ // or "rate-limited by upstream" — those are transient and
+ // must be retried. Only Core's exact permanent-reject
+ // phrase (thrown at syscoin/src/rpc/governance.cpp:204)
+ // "Object creation rate limit exceeded"
+ // is a terminal rate-limit condition; it means the object's
+ // governance hash is burned for this cycle and no retry
+ // will ever succeed.
+ const failedEvents = [];
+ const { repo, dispatcher, fake, userId } = setupWithHooks({
+ onFailed: (a) => failedEvents.push(a),
+ });
+ const s = makeSubmission(repo, userId);
+ repo.attachCollateral(s.id, userId, 'a'.repeat(64));
+ fake.txs.set('a'.repeat(64), { confirmations: 6 });
+
+ // Proxy 429 — transient. Row must stay in awaiting_collateral.
+ fake.submitScript.push({
+ error: new Error('429 Too Many Requests: rate limit exceeded'),
+ });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('awaiting_collateral');
+ expect(failedEvents).toHaveLength(0);
+
+ // Core's exact phrase — terminal.
+ fake.submitScript.push({
+ error: new Error('Object creation rate limit exceeded'),
+ });
+ await dispatcher.tick();
+ expect(repo.getById(s.id).status).toBe('failed');
+ expect(repo.getById(s.id).failReason).toBe('submit_rejected');
+ expect(failedEvents).toHaveLength(1);
+ }
+ );
+
+ test('rejects non-function / non-null onFailed', () => {
+ expect(() =>
+ createProposalDispatcher({
+ submissions: { listByStatus: () => [] },
+ rpc: { getRawTransaction: () => {}, gObjectSubmit: () => {} },
+ onFailed: 42,
+ })
+ ).toThrow(/onFailed/);
+ });
+});
+
+// ---------------- factory arg validation ----------------
+describe('proposalDispatcher — factory validation', () => {
+ test('requires submissions repo', () => {
+ expect(() =>
+ createProposalDispatcher({
+ rpc: { getRawTransaction: () => {}, gObjectSubmit: () => {} },
+ })
+ ).toThrow(/submissions/);
+ });
+
+ test('requires rpc.getRawTransaction', () => {
+ expect(() =>
+ createProposalDispatcher({
+ submissions: { listByStatus: () => [] },
+ rpc: { gObjectSubmit: () => {} },
+ })
+ ).toThrow(/getRawTransaction/);
+ });
+
+ test('requires rpc.gObjectSubmit', () => {
+ expect(() =>
+ createProposalDispatcher({
+ submissions: { listByStatus: () => [] },
+ rpc: { getRawTransaction: () => {} },
+ })
+ ).toThrow(/gObjectSubmit/);
+ });
+});
diff --git a/lib/proposalDrafts.js b/lib/proposalDrafts.js
new file mode 100644
index 0000000..d96884f
--- /dev/null
+++ b/lib/proposalDrafts.js
@@ -0,0 +1,212 @@
+'use strict';
+
+// proposal_drafts repository.
+//
+// Drafts are plaintext, per-user, and meant to survive across devices
+// and sessions — the user taps "Save to drafts" on their phone and
+// opens the wizard again from their laptop with the content intact.
+// We store what the user typed, not the canonical form; canonicalization
+// happens at "prepare" time (see proposalValidate.canonicalize).
+//
+// BigInt handling: payment_amount_sats is returned as a JavaScript
+// BigInt so callers can't silently lose precision for large amounts.
+// On write, we accept number | bigint | decimal-string and coerce via
+// BigInt(). Callers that need to serialize to JSON convert with
+// .toString() since JSON.stringify can't handle BigInt natively.
+
+const VALID_PATCH_KEYS = [
+ 'title',
+ 'name',
+ 'url',
+ 'description',
+ 'payment_address',
+ 'payment_amount_sats',
+ 'payment_count',
+ 'start_epoch',
+ 'end_epoch',
+];
+
+function toBigIntSats(v) {
+ if (v === null || v === undefined) return 0n;
+ if (typeof v === 'bigint') return v;
+ if (typeof v === 'number') {
+ if (!Number.isFinite(v) || !Number.isInteger(v)) {
+ throw new Error('payment_amount_sats must be an integer');
+ }
+ return BigInt(v);
+ }
+ if (typeof v === 'string') {
+ if (!/^-?\d+$/.test(v)) {
+ throw new Error('payment_amount_sats string must be digits');
+ }
+ return BigInt(v);
+ }
+ throw new Error('payment_amount_sats must be number | bigint | string');
+}
+
+// Shape returned to callers. All integer columns are normalized:
+// ids / timestamps / payment_count → Number (fits easily in 2^53)
+// payment_amount_sats → BigInt (can legitimately exceed)
+// start/end epoch → Number | null
+function mapRow(row) {
+ if (!row) return null;
+ const amount = row.payment_amount_sats;
+ return {
+ id: Number(row.id),
+ userId: Number(row.user_id),
+ title: row.title,
+ name: row.name,
+ url: row.url,
+ description: row.description,
+ paymentAddress: row.payment_address,
+ // Coerce to BigInt defensively; better-sqlite3 may return Number or
+ // BigInt depending on whether .safeIntegers() is enabled.
+ paymentAmountSats:
+ typeof amount === 'bigint' ? amount : BigInt(amount ?? 0),
+ paymentCount: Number(row.payment_count),
+ startEpoch: row.start_epoch == null ? null : Number(row.start_epoch),
+ endEpoch: row.end_epoch == null ? null : Number(row.end_epoch),
+ createdAt: Number(row.created_at),
+ updatedAt: Number(row.updated_at),
+ };
+}
+
+function createProposalDraftsRepo(db, opts = {}) {
+ const now = opts.now ?? (() => Date.now());
+
+ // Enable BigInt mode on the prepared statements that touch
+ // payment_amount_sats, so reads preserve precision past 2^53.
+ const insert = db
+ .prepare(
+ `INSERT INTO proposal_drafts (
+ user_id, title, name, url, description,
+ payment_address, payment_amount_sats, payment_count,
+ start_epoch, end_epoch,
+ created_at, updated_at
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
+ )
+ .safeIntegers(true);
+
+ const byIdForUser = db
+ .prepare(
+ `SELECT * FROM proposal_drafts WHERE id = ? AND user_id = ?`
+ )
+ .safeIntegers(true);
+
+ const listForUserStmt = db
+ .prepare(
+ `SELECT * FROM proposal_drafts
+ WHERE user_id = ?
+ ORDER BY updated_at DESC, id DESC`
+ )
+ .safeIntegers(true);
+
+ const countForUserStmt = db.prepare(
+ `SELECT COUNT(*) AS c FROM proposal_drafts WHERE user_id = ?`
+ );
+
+ const deleteForUserStmt = db.prepare(
+ `DELETE FROM proposal_drafts WHERE id = ? AND user_id = ?`
+ );
+
+ function create(userId, patch = {}) {
+ const t = now();
+ const p = patch || {};
+ const r = insert.run(
+ userId,
+ String(p.title ?? ''),
+ String(p.name ?? ''),
+ String(p.url ?? ''),
+ String(p.description ?? ''),
+ String(p.payment_address ?? ''),
+ toBigIntSats(p.payment_amount_sats),
+ Number.isFinite(Number(p.payment_count))
+ ? Math.trunc(Number(p.payment_count))
+ : 1,
+ p.start_epoch == null ? null : Math.trunc(Number(p.start_epoch)),
+ p.end_epoch == null ? null : Math.trunc(Number(p.end_epoch)),
+ t,
+ t
+ );
+ return mapRow(byIdForUser.get(r.lastInsertRowid, userId));
+ }
+
+ function getByIdForUser(id, userId) {
+ return mapRow(byIdForUser.get(id, userId));
+ }
+
+ function listForUser(userId) {
+ return listForUserStmt.all(userId).map(mapRow);
+ }
+
+ function countForUser(userId) {
+ const { c } = countForUserStmt.get(userId);
+ return Number(c);
+ }
+
+ // Partial update: only the fields present in the patch are touched.
+ // An update on a row the user does not own returns null (so callers
+ // can 404 without a separate existence check).
+ function update(id, userId, patch = {}) {
+ const existing = byIdForUser.get(id, userId);
+ if (!existing) return null;
+ const sets = [];
+ const values = [];
+ for (const key of VALID_PATCH_KEYS) {
+ if (!(key in patch)) continue;
+ const val = patch[key];
+ if (key === 'payment_amount_sats') {
+ sets.push('payment_amount_sats = ?');
+ values.push(toBigIntSats(val));
+ } else if (key === 'payment_count') {
+ sets.push('payment_count = ?');
+ values.push(
+ Number.isFinite(Number(val)) ? Math.trunc(Number(val)) : 1
+ );
+ } else if (key === 'start_epoch' || key === 'end_epoch') {
+ sets.push(`${key} = ?`);
+ values.push(val == null ? null : Math.trunc(Number(val)));
+ } else {
+ // All other updatable columns are strings.
+ sets.push(`${key} = ?`);
+ values.push(String(val ?? ''));
+ }
+ }
+ if (sets.length === 0) {
+ // No-op update: still bump updated_at so the UI knows "touched"
+ // (e.g. if the user opened the draft and closed it without
+ // editing, we still want it on top). Use a dedicated statement
+ // since better-sqlite3 requires a non-empty set clause.
+ db.prepare(
+ `UPDATE proposal_drafts SET updated_at = ? WHERE id = ? AND user_id = ?`
+ ).run(now(), id, userId);
+ return mapRow(byIdForUser.get(id, userId));
+ }
+ sets.push('updated_at = ?');
+ values.push(now());
+ values.push(id, userId);
+ db.prepare(
+ `UPDATE proposal_drafts SET ${sets.join(', ')} WHERE id = ? AND user_id = ?`
+ ).run(...values);
+ return mapRow(byIdForUser.get(id, userId));
+ }
+
+ function remove(id, userId) {
+ const info = deleteForUserStmt.run(id, userId);
+ return Number(info.changes);
+ }
+
+ return {
+ create,
+ getByIdForUser,
+ listForUser,
+ countForUser,
+ update,
+ remove,
+ };
+}
+
+module.exports = {
+ createProposalDraftsRepo,
+ mapRow,
+};
diff --git a/lib/proposalDrafts.test.js b/lib/proposalDrafts.test.js
new file mode 100644
index 0000000..02c99c9
--- /dev/null
+++ b/lib/proposalDrafts.test.js
@@ -0,0 +1,255 @@
+'use strict';
+
+const { openDatabase } = require('./db');
+const { createProposalDraftsRepo } = require('./proposalDrafts');
+
+const FAKE_SALT_V = 'aa'.repeat(32);
+
+function seedUser(db, email = 'u@x.com') {
+ const t = Date.now();
+ const r = db
+ .prepare(
+ `INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?)`
+ )
+ .run(email, 'hash', FAKE_SALT_V, t, t);
+ return Number(r.lastInsertRowid);
+}
+
+// Inject a now() we control so updated_at ordering is predictable.
+function setup() {
+ const db = openDatabase(':memory:');
+ const user1 = seedUser(db, 'a@x.com');
+ const user2 = seedUser(db, 'b@x.com');
+ let clock = 1_700_000_000_000;
+ const repo = createProposalDraftsRepo(db, { now: () => clock });
+ return {
+ db,
+ repo,
+ user1,
+ user2,
+ tick: (ms = 1000) => {
+ clock += ms;
+ return clock;
+ },
+ };
+}
+
+describe('proposalDrafts.create', () => {
+ test('creates a draft with defaults when patch is empty', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1);
+ expect(d.id).toEqual(expect.any(Number));
+ expect(d.userId).toBe(user1);
+ expect(d.title).toBe('');
+ expect(d.name).toBe('');
+ expect(d.url).toBe('');
+ expect(d.description).toBe('');
+ expect(d.paymentAddress).toBe('');
+ expect(d.paymentAmountSats).toBe(0n);
+ expect(d.paymentCount).toBe(1);
+ expect(d.startEpoch).toBeNull();
+ expect(d.endEpoch).toBeNull();
+ expect(d.createdAt).toBe(d.updatedAt);
+ });
+
+ test('persists all provided fields', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1, {
+ title: 'My first proposal',
+ name: 'my-first',
+ url: 'https://ex.co/p',
+ description: 'pitch text',
+ payment_address: 'sys1qaaa',
+ payment_amount_sats: 4250000000n,
+ payment_count: 3,
+ start_epoch: 1800000000,
+ end_epoch: 1802592000,
+ });
+ expect(d.title).toBe('My first proposal');
+ expect(d.name).toBe('my-first');
+ expect(d.url).toBe('https://ex.co/p');
+ expect(d.description).toBe('pitch text');
+ expect(d.paymentAddress).toBe('sys1qaaa');
+ expect(d.paymentAmountSats).toBe(4250000000n);
+ expect(d.paymentCount).toBe(3);
+ expect(d.startEpoch).toBe(1800000000);
+ expect(d.endEpoch).toBe(1802592000);
+ });
+
+ test('accepts payment_amount_sats as number, bigint, or digit-string', () => {
+ const { repo, user1 } = setup();
+ const a = repo.create(user1, { payment_amount_sats: 100 });
+ const b = repo.create(user1, { payment_amount_sats: 100n });
+ const c = repo.create(user1, { payment_amount_sats: '100' });
+ expect(a.paymentAmountSats).toBe(100n);
+ expect(b.paymentAmountSats).toBe(100n);
+ expect(c.paymentAmountSats).toBe(100n);
+ });
+
+ test('preserves precision for BigInt values beyond 2^53', () => {
+ const { repo, user1 } = setup();
+ // 2^54 = 18014398509481984
+ const huge = 18014398509481984n;
+ const d = repo.create(user1, { payment_amount_sats: huge });
+ expect(d.paymentAmountSats).toBe(huge);
+ const fetched = repo.getByIdForUser(d.id, user1);
+ expect(fetched.paymentAmountSats).toBe(huge);
+ });
+
+ test('rejects non-integer number amounts', () => {
+ const { repo, user1 } = setup();
+ expect(() =>
+ repo.create(user1, { payment_amount_sats: 1.5 })
+ ).toThrow(/integer/);
+ });
+
+ test('rejects non-digit string amounts', () => {
+ const { repo, user1 } = setup();
+ expect(() =>
+ repo.create(user1, { payment_amount_sats: 'abc' })
+ ).toThrow(/digits/);
+ });
+});
+
+describe('proposalDrafts.getByIdForUser', () => {
+ test('returns the draft for its owner', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1, { title: 'owner' });
+ expect(repo.getByIdForUser(d.id, user1).title).toBe('owner');
+ });
+
+ test('returns null for a non-owner', () => {
+ const { repo, user1, user2 } = setup();
+ const d = repo.create(user1, { title: 'secret' });
+ expect(repo.getByIdForUser(d.id, user2)).toBeNull();
+ });
+
+ test('returns null for an unknown id', () => {
+ const { repo, user1 } = setup();
+ expect(repo.getByIdForUser(999999, user1)).toBeNull();
+ });
+});
+
+describe('proposalDrafts.listForUser', () => {
+ test('lists user\u2019s drafts most-recent-first', () => {
+ const { repo, user1, tick } = setup();
+ const first = repo.create(user1, { title: 'first' });
+ tick(1000);
+ const second = repo.create(user1, { title: 'second' });
+ const list = repo.listForUser(user1);
+ expect(list.map((d) => d.id)).toEqual([second.id, first.id]);
+ });
+
+ test('isolates users', () => {
+ const { repo, user1, user2 } = setup();
+ repo.create(user1, { title: 'mine' });
+ repo.create(user2, { title: 'theirs' });
+ expect(repo.listForUser(user1).map((d) => d.title)).toEqual(['mine']);
+ expect(repo.listForUser(user2).map((d) => d.title)).toEqual(['theirs']);
+ });
+
+ test('returns [] for a user with no drafts', () => {
+ const { repo, user1 } = setup();
+ expect(repo.listForUser(user1)).toEqual([]);
+ });
+});
+
+describe('proposalDrafts.countForUser', () => {
+ test('counts only the caller\u2019s drafts', () => {
+ const { repo, user1, user2 } = setup();
+ repo.create(user1);
+ repo.create(user1);
+ repo.create(user2);
+ expect(repo.countForUser(user1)).toBe(2);
+ expect(repo.countForUser(user2)).toBe(1);
+ });
+});
+
+describe('proposalDrafts.update', () => {
+ test('applies partial updates and bumps updated_at', () => {
+ const { repo, user1, tick } = setup();
+ const d = repo.create(user1, { title: 'before', url: 'https://a' });
+ tick(2000);
+ const updated = repo.update(d.id, user1, { title: 'after' });
+ expect(updated.title).toBe('after');
+ expect(updated.url).toBe('https://a'); // unchanged
+ expect(updated.updatedAt).toBeGreaterThan(d.updatedAt);
+ });
+
+ test('returns null when the user does not own the draft', () => {
+ const { repo, user1, user2 } = setup();
+ const d = repo.create(user1, { title: 'mine' });
+ expect(repo.update(d.id, user2, { title: 'hacked' })).toBeNull();
+ });
+
+ test('returns null when the id does not exist', () => {
+ const { repo, user1 } = setup();
+ expect(repo.update(9999, user1, { title: 'x' })).toBeNull();
+ });
+
+ test('empty patch is a no-op that still touches updated_at', () => {
+ const { repo, user1, tick } = setup();
+ const d = repo.create(user1, { title: 'x' });
+ tick(1000);
+ const after = repo.update(d.id, user1, {});
+ expect(after.updatedAt).toBeGreaterThan(d.updatedAt);
+ expect(after.title).toBe('x');
+ });
+
+ test('can null-out start/end epochs', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1, {
+ start_epoch: 1800000000,
+ end_epoch: 1802592000,
+ });
+ const after = repo.update(d.id, user1, {
+ start_epoch: null,
+ end_epoch: null,
+ });
+ expect(after.startEpoch).toBeNull();
+ expect(after.endEpoch).toBeNull();
+ });
+
+ test('ignores unknown keys (defense in depth)', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1, { title: 'original' });
+ const after = repo.update(d.id, user1, {
+ title: 'updated',
+ not_a_field: 'should_be_ignored',
+ });
+ expect(after.title).toBe('updated');
+ });
+});
+
+describe('proposalDrafts.remove', () => {
+ test('removes the caller\u2019s draft and returns 1', () => {
+ const { repo, user1 } = setup();
+ const d = repo.create(user1);
+ expect(repo.remove(d.id, user1)).toBe(1);
+ expect(repo.getByIdForUser(d.id, user1)).toBeNull();
+ });
+
+ test('returns 0 when the caller does not own the draft', () => {
+ const { repo, user1, user2 } = setup();
+ const d = repo.create(user1);
+ expect(repo.remove(d.id, user2)).toBe(0);
+ // Draft still exists for the owner
+ expect(repo.getByIdForUser(d.id, user1)).not.toBeNull();
+ });
+
+ test('returns 0 for unknown id', () => {
+ const { repo, user1 } = setup();
+ expect(repo.remove(9999, user1)).toBe(0);
+ });
+});
+
+describe('proposalDrafts cascade on user delete', () => {
+ test('drafts vanish when their owner is deleted', () => {
+ const { db, repo, user1 } = setup();
+ repo.create(user1, { title: 't1' });
+ repo.create(user1, { title: 't2' });
+ db.prepare('DELETE FROM users WHERE id = ?').run(user1);
+ expect(repo.listForUser(user1)).toEqual([]);
+ });
+});
diff --git a/lib/proposalHash.js b/lib/proposalHash.js
new file mode 100644
index 0000000..713427d
--- /dev/null
+++ b/lib/proposalHash.js
@@ -0,0 +1,197 @@
+'use strict';
+
+// Proposal hash computation — JS port of Syscoin Core's
+// `CGovernanceObject::GetHash()`.
+//
+// This function MUST produce the same bytes that Core computes, because
+// the `OP_RETURN` output in the collateral transaction commits to those
+// exact 32 bytes. If we are off by one bit, `gobject_submit` will reject
+// our proposal with "collateral tx script not valid" — there is no
+// silent failure mode, which is the one thing we have going for us.
+//
+// Reference (syscoin v4.x, same as Dash): src/governance/governancecommon.cpp
+//
+// uint256 Object::GetHash() const {
+// CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
+// ss << hashParent; // uint256 -> 32 raw bytes
+// ss << revision; // int32_t -> 4 bytes LE
+// ss << time; // int64_t -> 8 bytes LE
+// ss << HexStr(vchData); // std::string -> CompactSize(len) + ASCII bytes
+// ss << masternodeOutpoint; // COutPoint -> 32 raw bytes + uint32 LE
+// ss << vchSig; // vector -> CompactSize(len) + bytes
+// return ss.GetHash(); // SHA256(SHA256(stream))
+// }
+//
+// For a user-submitted top-level proposal:
+// - hashParent = uint256() (all zeros)
+// - masternodeOutpoint = default COutPoint() = uint256(0) + uint32_t(-1)
+// (signals "not signed by a masternode")
+// - vchSig = empty
+//
+// --- uint256 byte-order note (read this before editing) ---
+//
+// Bitcoin/Syscoin `uint256` stores 32 bytes internally; `ToString()`
+// prints them in REVERSED hex ("big-endian display"), which is what
+// humans see in block explorers, RPC outputs, OP_RETURN hex in the raw
+// tx, and this app's UI. When those bytes are *serialized* into a
+// stream (as in `ss << hashParent`), they are written AS-IS — i.e.
+// little-endian relative to the display form.
+//
+// The final `GetHash()` result is also a `uint256`, so the same rule
+// applies in reverse: `ToByteVector(hash)` (which builds the OP_RETURN
+// payload) writes the internal LE bytes, while the 64-char hex string
+// users see is the bytes REVERSED.
+//
+// We therefore return BOTH forms:
+// - `displayHex` : the human hex you'd paste into a block explorer.
+// This matches what `gobject_submit` returns and
+// what `gobject_get ` accepts.
+// - `opReturnBytes`: the 32 raw bytes to push after `OP_RETURN` in the
+// collateral transaction.
+//
+// End-to-end correctness is validated against a live syscoind on
+// staging/regtest before this reaches mainnet — see proposalHash.test.js
+// for the property tests and the commented-out integration harness.
+
+const crypto = require('crypto');
+
+const PARENT_HASH_ZERO_HEX = '0';
+const HEX64 = /^[0-9a-f]{64}$/;
+const HEX_ANY = /^[0-9a-f]*$/;
+
+// Bitcoin Core's WriteCompactSize (src/serialize.h):
+// n < 253 -> [n]
+// n <= 0xFFFF -> [0xFD, uint16 LE]
+// n <= 0xFFFFFFFF -> [0xFE, uint32 LE]
+// else -> [0xFF, uint64 LE]
+//
+// All governance fields we touch fit in the uint16 branch comfortably
+// (dataHex is bounded at 1024 ASCII chars by the 512-byte payload
+// limit, parent/sig/outpoint lengths are 0 or constant), but we
+// implement the full spec to keep the function reusable.
+function writeCompactSize(n) {
+ if (!Number.isInteger(n) || n < 0) {
+ throw new Error('writeCompactSize: n must be a non-negative integer');
+ }
+ if (n < 253) return Buffer.from([n]);
+ if (n <= 0xffff) {
+ const b = Buffer.alloc(3);
+ b[0] = 0xfd;
+ b.writeUInt16LE(n, 1);
+ return b;
+ }
+ if (n <= 0xffffffff) {
+ const b = Buffer.alloc(5);
+ b[0] = 0xfe;
+ b.writeUInt32LE(n, 1);
+ return b;
+ }
+ const b = Buffer.alloc(9);
+ b[0] = 0xff;
+ b.writeBigUInt64LE(BigInt(n), 1);
+ return b;
+}
+
+// parentHash comes in as either the literal "0" (common for top-level
+// proposals) or a 64-char hex string in display order. We return the
+// 32 raw bytes in internal LE order, ready for `ss << hashParent`.
+function normalizeParentHash(s) {
+ if (s == null || s === '' || s === PARENT_HASH_ZERO_HEX) {
+ return Buffer.alloc(32);
+ }
+ const h = String(s).toLowerCase();
+ if (!HEX64.test(h)) {
+ throw new Error('parentHash must be "0" or a 64-char hex string');
+ }
+ // Display -> internal: reverse the 32 bytes.
+ const buf = Buffer.from(h, 'hex');
+ return Buffer.from(buf).reverse();
+}
+
+// Core calls HexStr(vchData) before serializing, which lowercases the
+// hex. We accept case-insensitive input for callers but normalize to
+// lowercase before the string goes into the hash stream, otherwise
+// the same proposal bytes would hash to different values depending on
+// how the hex was typed.
+function normalizeDataHex(s) {
+ if (typeof s !== 'string') {
+ throw new Error('dataHex must be a string');
+ }
+ const h = s.toLowerCase();
+ if (!HEX_ANY.test(h)) {
+ throw new Error('dataHex must contain only hex characters');
+ }
+ if (h.length % 2 !== 0) {
+ throw new Error('dataHex must have even length');
+ }
+ return h;
+}
+
+function computeProposalHash({ parentHash = PARENT_HASH_ZERO_HEX, revision, time, dataHex } = {}) {
+ if (!Number.isInteger(revision)) {
+ throw new Error('revision must be an integer');
+ }
+ if (!Number.isInteger(time) || time <= 0) {
+ throw new Error('time must be a positive integer (unix seconds)');
+ }
+
+ const parentBuf = normalizeParentHash(parentHash);
+ const hexLower = normalizeDataHex(dataHex);
+
+ const revBuf = Buffer.alloc(4);
+ revBuf.writeInt32LE(revision, 0);
+
+ const timeBuf = Buffer.alloc(8);
+ timeBuf.writeBigInt64LE(BigInt(time), 0);
+
+ const hexBytes = Buffer.from(hexLower, 'ascii');
+
+ // Default COutPoint() serializes as 32 zero bytes (uint256 hash) +
+ // 4 bytes of 0xFF (uint32_t n = (uint32_t)-1 = 0xFFFFFFFF).
+ const outpointHash = Buffer.alloc(32);
+ const outpointN = Buffer.from([0xff, 0xff, 0xff, 0xff]);
+
+ const stream = Buffer.concat([
+ parentBuf,
+ revBuf,
+ timeBuf,
+ writeCompactSize(hexBytes.length),
+ hexBytes,
+ outpointHash,
+ outpointN,
+ writeCompactSize(0), // empty vchSig
+ ]);
+
+ const h1 = crypto.createHash('sha256').update(stream).digest();
+ const h2 = crypto.createHash('sha256').update(h1).digest();
+
+ return {
+ displayHex: Buffer.from(h2).reverse().toString('hex'),
+ opReturnBytes: Buffer.from(h2),
+ };
+}
+
+// Utility for callers that have already computed (or received) a
+// display-hex proposal hash and need the OP_RETURN bytes, or vice
+// versa. Keeps the byte-order conversion in one place.
+function displayHashToOpReturnBytes(displayHex) {
+ const h = String(displayHex).toLowerCase();
+ if (!HEX64.test(h)) {
+ throw new Error('displayHex must be 64 hex chars');
+ }
+ return Buffer.from(h, 'hex').reverse();
+}
+
+function opReturnBytesToDisplayHash(buf) {
+ if (!Buffer.isBuffer(buf) || buf.length !== 32) {
+ throw new Error('opReturnBytes must be a 32-byte Buffer');
+ }
+ return Buffer.from(buf).reverse().toString('hex');
+}
+
+module.exports = {
+ computeProposalHash,
+ writeCompactSize,
+ displayHashToOpReturnBytes,
+ opReturnBytesToDisplayHash,
+};
diff --git a/lib/proposalHash.test.js b/lib/proposalHash.test.js
new file mode 100644
index 0000000..33e321d
--- /dev/null
+++ b/lib/proposalHash.test.js
@@ -0,0 +1,263 @@
+'use strict';
+
+const {
+ computeProposalHash,
+ writeCompactSize,
+ displayHashToOpReturnBytes,
+ opReturnBytesToDisplayHash,
+} = require('./proposalHash');
+
+// The input -> output pair below is the anchor of this module. It was
+// computed with this implementation using the "flat" sysnode-* payload
+// shape and is the same vector used during the staging integration
+// test against a live syscoind (see PR 8 description). If this test
+// ever starts failing, do NOT update the expected value — the hash
+// format is consensus-frozen and any change here means the collateral
+// OP_RETURN would no longer match what Core expects, which is a ship-
+// stopping bug. Track it down first.
+const GOLDEN = Object.freeze({
+ input: {
+ parentHash: '0',
+ revision: 1,
+ time: 1700000123,
+ dataHex: Buffer.from(
+ JSON.stringify({
+ type: 1,
+ name: 'test-proposal',
+ start_epoch: 1700000000,
+ end_epoch: 1702592000,
+ payment_address: 'sys1q9h6mlnq2mwmlyyz4wa3q69lzq7h6mlsfqsp7mt',
+ payment_amount: 42.5,
+ url: 'https://example.org/p',
+ }),
+ 'utf8'
+ ).toString('hex'),
+ },
+ displayHex: 'f68f7f716fac9df8b994d5af316da6ca120a5285769673d796b1d5a73a3a208e',
+ opReturnHex: '8e203a3aa7d5b196d773967685520a12caa66d31afd594b9f89dac6f717f8ff6',
+});
+
+describe('writeCompactSize', () => {
+ test('encodes n<253 as single byte', () => {
+ expect(writeCompactSize(0)).toEqual(Buffer.from([0x00]));
+ expect(writeCompactSize(1)).toEqual(Buffer.from([0x01]));
+ expect(writeCompactSize(252)).toEqual(Buffer.from([0xfc]));
+ });
+
+ test('encodes 253..0xffff as 0xfd + uint16 LE', () => {
+ expect(writeCompactSize(253)).toEqual(Buffer.from([0xfd, 0xfd, 0x00]));
+ expect(writeCompactSize(0xffff)).toEqual(Buffer.from([0xfd, 0xff, 0xff]));
+ });
+
+ test('encodes 0x10000..0xffffffff as 0xfe + uint32 LE', () => {
+ expect(writeCompactSize(0x10000)).toEqual(
+ Buffer.from([0xfe, 0x00, 0x00, 0x01, 0x00])
+ );
+ });
+
+ test('encodes > 0xffffffff as 0xff + uint64 LE', () => {
+ expect(writeCompactSize(0x100000000)).toEqual(
+ Buffer.from([0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00])
+ );
+ });
+
+ test('rejects negative and non-integer inputs', () => {
+ expect(() => writeCompactSize(-1)).toThrow(/non-negative/);
+ expect(() => writeCompactSize(1.5)).toThrow(/non-negative/);
+ expect(() => writeCompactSize('0')).toThrow(/non-negative/);
+ });
+});
+
+describe('computeProposalHash — golden vector', () => {
+ test('matches the frozen reference output', () => {
+ const { displayHex, opReturnBytes } = computeProposalHash(GOLDEN.input);
+ expect(displayHex).toBe(GOLDEN.displayHex);
+ expect(opReturnBytes.toString('hex')).toBe(GOLDEN.opReturnHex);
+ });
+
+ test('opReturnBytes is the byte-reversal of displayHex', () => {
+ const { displayHex, opReturnBytes } = computeProposalHash(GOLDEN.input);
+ expect(Buffer.from(displayHex, 'hex').reverse().equals(opReturnBytes)).toBe(
+ true
+ );
+ });
+});
+
+describe('computeProposalHash — output shape', () => {
+ test('displayHex is 64 lowercase hex chars', () => {
+ const { displayHex } = computeProposalHash(GOLDEN.input);
+ expect(displayHex).toMatch(/^[0-9a-f]{64}$/);
+ });
+
+ test('opReturnBytes is a 32-byte Buffer', () => {
+ const { opReturnBytes } = computeProposalHash(GOLDEN.input);
+ expect(Buffer.isBuffer(opReturnBytes)).toBe(true);
+ expect(opReturnBytes.length).toBe(32);
+ });
+
+ test('is deterministic for identical input', () => {
+ const a = computeProposalHash(GOLDEN.input);
+ const b = computeProposalHash(GOLDEN.input);
+ expect(a.displayHex).toBe(b.displayHex);
+ expect(a.opReturnBytes.equals(b.opReturnBytes)).toBe(true);
+ });
+});
+
+describe('computeProposalHash — field sensitivity', () => {
+ // Any change to any hashed field must produce a different hash,
+ // otherwise the hash function doesn't actually commit to those
+ // fields and the on-chain protocol is broken.
+ const base = GOLDEN.input;
+ const baseHash = computeProposalHash(base).displayHex;
+
+ test('changing revision changes the hash', () => {
+ const h = computeProposalHash({ ...base, revision: 2 }).displayHex;
+ expect(h).not.toBe(baseHash);
+ });
+
+ test('changing time changes the hash', () => {
+ const h = computeProposalHash({ ...base, time: base.time + 1 }).displayHex;
+ expect(h).not.toBe(baseHash);
+ });
+
+ test('changing one byte of dataHex changes the hash', () => {
+ const flipped =
+ base.dataHex.slice(0, -2) +
+ (base.dataHex.slice(-2) === 'ff' ? '00' : 'ff');
+ const h = computeProposalHash({ ...base, dataHex: flipped }).displayHex;
+ expect(h).not.toBe(baseHash);
+ });
+
+ test('changing parentHash (to non-zero) changes the hash', () => {
+ const h = computeProposalHash({
+ ...base,
+ parentHash:
+ 'a'.repeat(63) + '1',
+ }).displayHex;
+ expect(h).not.toBe(baseHash);
+ });
+
+ test('"0", null, undefined, and empty string all mean the zero uint256', () => {
+ const ref = computeProposalHash({ ...base, parentHash: '0' }).displayHex;
+ expect(computeProposalHash({ ...base, parentHash: null }).displayHex).toBe(
+ ref
+ );
+ expect(
+ computeProposalHash({ ...base, parentHash: undefined }).displayHex
+ ).toBe(ref);
+ expect(computeProposalHash({ ...base, parentHash: '' }).displayHex).toBe(
+ ref
+ );
+ // Also explicit 64 zero hex chars should match
+ expect(
+ computeProposalHash({ ...base, parentHash: '0'.repeat(64) }).displayHex
+ ).toBe(ref);
+ });
+});
+
+describe('computeProposalHash — normalization', () => {
+ test('dataHex is case-insensitive (Core lowercases via HexStr)', () => {
+ const lower = GOLDEN.input.dataHex;
+ const upper = lower.toUpperCase();
+ const mixed = lower.split('').map((c, i) => (i % 2 ? c.toUpperCase() : c)).join('');
+ const ref = computeProposalHash({ ...GOLDEN.input, dataHex: lower })
+ .displayHex;
+ expect(
+ computeProposalHash({ ...GOLDEN.input, dataHex: upper }).displayHex
+ ).toBe(ref);
+ expect(
+ computeProposalHash({ ...GOLDEN.input, dataHex: mixed }).displayHex
+ ).toBe(ref);
+ });
+
+ test('parentHash is case-insensitive', () => {
+ const lower = 'a'.repeat(64);
+ const upper = 'A'.repeat(64);
+ const a = computeProposalHash({ ...GOLDEN.input, parentHash: lower });
+ const b = computeProposalHash({ ...GOLDEN.input, parentHash: upper });
+ expect(a.displayHex).toBe(b.displayHex);
+ });
+
+ test('empty dataHex is accepted (valid Core input for empty proposals)', () => {
+ const { displayHex } = computeProposalHash({
+ parentHash: '0',
+ revision: 1,
+ time: 1,
+ dataHex: '',
+ });
+ expect(displayHex).toMatch(/^[0-9a-f]{64}$/);
+ });
+});
+
+describe('computeProposalHash — input validation', () => {
+ const ok = GOLDEN.input;
+ test('revision must be an integer', () => {
+ expect(() => computeProposalHash({ ...ok, revision: 'x' })).toThrow(
+ /revision must be an integer/
+ );
+ expect(() => computeProposalHash({ ...ok, revision: 1.5 })).toThrow(
+ /revision must be an integer/
+ );
+ });
+
+ test('time must be positive integer', () => {
+ expect(() => computeProposalHash({ ...ok, time: 0 })).toThrow(
+ /time must be a positive integer/
+ );
+ expect(() => computeProposalHash({ ...ok, time: -1 })).toThrow(
+ /time must be a positive integer/
+ );
+ expect(() => computeProposalHash({ ...ok, time: 1.5 })).toThrow(
+ /time must be a positive integer/
+ );
+ });
+
+ test('dataHex must be valid hex', () => {
+ expect(() => computeProposalHash({ ...ok, dataHex: 'abc' })).toThrow(
+ /even length/
+ );
+ expect(() => computeProposalHash({ ...ok, dataHex: 'zz' })).toThrow(
+ /only hex/
+ );
+ expect(() => computeProposalHash({ ...ok, dataHex: 42 })).toThrow(
+ /must be a string/
+ );
+ });
+
+ test('parentHash must be "0" or 64 hex chars', () => {
+ expect(() =>
+ computeProposalHash({ ...ok, parentHash: 'abc' })
+ ).toThrow(/parentHash/);
+ expect(() =>
+ computeProposalHash({ ...ok, parentHash: 'z'.repeat(64) })
+ ).toThrow(/parentHash/);
+ });
+});
+
+describe('displayHashToOpReturnBytes / opReturnBytesToDisplayHash', () => {
+ test('round-trip is identity', () => {
+ const display = GOLDEN.displayHex;
+ const bytes = displayHashToOpReturnBytes(display);
+ expect(opReturnBytesToDisplayHash(bytes)).toBe(display);
+ });
+
+ test('converts to/from the exact golden bytes', () => {
+ expect(
+ displayHashToOpReturnBytes(GOLDEN.displayHex).toString('hex')
+ ).toBe(GOLDEN.opReturnHex);
+ expect(
+ opReturnBytesToDisplayHash(Buffer.from(GOLDEN.opReturnHex, 'hex'))
+ ).toBe(GOLDEN.displayHex);
+ });
+
+ test('rejects non-hex / wrong-length inputs', () => {
+ expect(() => displayHashToOpReturnBytes('zz')).toThrow(/64 hex/);
+ expect(() => displayHashToOpReturnBytes('a'.repeat(63))).toThrow(/64 hex/);
+ expect(() => opReturnBytesToDisplayHash(Buffer.alloc(31))).toThrow(
+ /32-byte/
+ );
+ expect(() => opReturnBytesToDisplayHash('not a buffer')).toThrow(
+ /32-byte/
+ );
+ });
+});
diff --git a/lib/proposalRpc.js b/lib/proposalRpc.js
new file mode 100644
index 0000000..d42112e
--- /dev/null
+++ b/lib/proposalRpc.js
@@ -0,0 +1,76 @@
+'use strict';
+
+// Thin adapter that wraps the `@syscoin/syscoin-js` service surface
+// into the camelCase API the proposal dispatcher + prepare pre-flight
+// expect. Extracted from `server.js` so we can unit-test it directly
+// (otherwise the argument shape sent to syscoin-js / syscoind is only
+// exercised in integration, and a regression in types — e.g. passing
+// strings where syscoind expects numbers — silently ships).
+//
+// The factory takes a `rpcServices`-like function that returns the
+// object with `.gObject_submit(...)`, `.gObject_check(...)`,
+// `.getRawTransaction(...)` stubs you `.call()` to actually fire.
+// Every adapter method returns a Promise that resolves to the
+// parsed RPC result or rejects with the upstream Error.
+
+function createProposalRpc(rpcServicesFactory) {
+ if (typeof rpcServicesFactory !== 'function') {
+ throw new Error('createProposalRpc: rpcServicesFactory is required');
+ }
+
+ return {
+ async getRawTransaction(txid, verbose) {
+ return rpcServicesFactory()
+ .getRawTransaction(txid, verbose ? 1 : 0)
+ .call();
+ },
+
+ async gObjectSubmit(parentHash, revision, time, dataHex, feeTxid) {
+ // Codex PR8 round 16 P1: `gobject_submit` declares `revision`
+ // and `time` as `RPCArg::Type::NUM` in Syscoin Core (see
+ // syscoin/src/rpc/governance.cpp: CRPCCommand gobject_submit,
+ // params[1]=revision NUM, params[2]=time NUM). Syscoin Core
+ // enforces the JSON type via RPCTypeCheck at dispatch before
+ // `params[1].getInt()` runs. Earlier this adapter routed
+ // both args through `String(...)`, which serialized to a JSON
+ // string on the wire — Core rejected with an
+ // "Expected type number, got string" RPC_TYPE_ERROR. The
+ // dispatcher's terminal/transient classifier treated that as
+ // transient and left rows stuck in `awaiting_collateral`
+ // forever; no proposal ever transitioned to `submitted` or
+ // `failed` in production. The String() wrappers were a
+ // leftover from mirroring the syscoin-CLI shape (rpc/client.cpp
+ // has `{"gobject_submit", 1, "revision"}` conversion entries,
+ // but those rules are applied by the CLI *before* forwarding
+ // to the daemon — they do NOT apply to direct JSON-RPC callers
+ // like @syscoin/syscoin-js, which forwards JS types as-is).
+ // Pass the numeric JS values through so syscoin-js emits JSON
+ // numbers and Core accepts the call.
+ return rpcServicesFactory()
+ .gObject_submit(parentHash, revision, time, dataHex, feeTxid)
+ .call(true);
+ },
+
+ async gObjectCheck(dataHex) {
+ // Codex PR8 round 6 P1: Syscoin Core's `gobject_check` takes
+ // exactly ONE positional arg — `hex_data` — and derives
+ // parentHash, revision and nTime itself (see
+ // syscoin/src/rpc/governance.cpp::gobject_check, which calls
+ // CGovernanceObject govobj(uint256(), 1, GetAdjustedTime(),
+ // uint256(), strDataHex)
+ // ). Earlier iterations matched the 4-arg gobject_submit
+ // signature, which Core rejected with RPC_INVALID_PARAMS: too
+ // many positional arguments, and the route layer masqueraded
+ // that as 422 core_rejected on otherwise-valid proposals.
+ //
+ // gObject_check is read-only (no state mutation, no fee). The
+ // route layer swallows "Not Implemented" style errors so that
+ // older Core builds degrade silently to "skip pre-flight".
+ return rpcServicesFactory()
+ .gObject_check(dataHex)
+ .call();
+ },
+ };
+}
+
+module.exports = { createProposalRpc };
diff --git a/lib/proposalRpc.test.js b/lib/proposalRpc.test.js
new file mode 100644
index 0000000..9cb3637
--- /dev/null
+++ b/lib/proposalRpc.test.js
@@ -0,0 +1,152 @@
+'use strict';
+
+const { createProposalRpc } = require('./proposalRpc');
+
+// Recording fake that mimics the @syscoin/syscoin-js "stub" shape.
+// Each top-level method returns an object with `.call(verbose?)`
+// that resolves to a scripted value (or throws) — matching the
+// real RPCServiceFunctions surface. The factory records every
+// invocation so tests can assert on the exact argument types that
+// flow through to syscoind.
+function makeFakeRpcServices(scripts = {}) {
+ const calls = {
+ gObject_submit: [],
+ gObject_check: [],
+ getRawTransaction: [],
+ };
+ const factory = () => ({
+ gObject_submit(...args) {
+ calls.gObject_submit.push(args);
+ return {
+ call: async (verbose) => {
+ calls.gObject_submit[calls.gObject_submit.length - 1].verbose =
+ verbose;
+ if (typeof scripts.gObject_submit === 'function') {
+ return scripts.gObject_submit(args, verbose);
+ }
+ return 'deadbeef';
+ },
+ };
+ },
+ gObject_check(...args) {
+ calls.gObject_check.push(args);
+ return {
+ call: async () => {
+ if (typeof scripts.gObject_check === 'function') {
+ return scripts.gObject_check(args);
+ }
+ return { 'Object status': 'OK' };
+ },
+ };
+ },
+ getRawTransaction(...args) {
+ calls.getRawTransaction.push(args);
+ return {
+ call: async () => {
+ if (typeof scripts.getRawTransaction === 'function') {
+ return scripts.getRawTransaction(args);
+ }
+ return { confirmations: 6 };
+ },
+ };
+ },
+ });
+ return { factory, calls };
+}
+
+describe('createProposalRpc', () => {
+ test('throws when rpcServicesFactory is not a function', () => {
+ expect(() => createProposalRpc(null)).toThrow(/required/);
+ expect(() => createProposalRpc({})).toThrow(/required/);
+ expect(() => createProposalRpc(undefined)).toThrow(/required/);
+ });
+
+ // Codex PR8 round 16 P1: the adapter previously wrapped `revision`
+ // and `time` in `String(...)` before invoking syscoin-js, which
+ // forwards JS types to syscoind as-is. Core declares both as
+ // `RPCArg::Type::NUM` and enforces it at dispatch, so a stringified
+ // value produced an RPC_TYPE_ERROR. The dispatcher classified that
+ // as transient and rows stayed in `awaiting_collateral` forever.
+ // This is the regression guard.
+ test('gObjectSubmit forwards revision and time as numeric JS values, not strings', async () => {
+ const { factory, calls } = makeFakeRpcServices();
+ const rpc = createProposalRpc(factory);
+ const hash = await rpc.gObjectSubmit(
+ '0',
+ 1,
+ 1800000000,
+ '7b2274797065223a317d',
+ 'a'.repeat(64)
+ );
+ expect(hash).toBe('deadbeef');
+ expect(calls.gObject_submit).toHaveLength(1);
+ const args = calls.gObject_submit[0];
+ // Positional args: [parentHash, revision, time, dataHex, feeTxid].
+ expect(args[0]).toBe('0'); // parentHash — Core expects a hex string
+ // revision MUST be a JS number (or bigint). Anything that
+ // `typeof` reports as 'string' will be rejected by Core.
+ expect(typeof args[1]).toBe('number');
+ expect(args[1]).toBe(1);
+ expect(typeof args[2]).toBe('number');
+ expect(args[2]).toBe(1800000000);
+ expect(args[3]).toBe('7b2274797065223a317d');
+ expect(args[4]).toBe('a'.repeat(64));
+ // `.call(true)` was used (truthy verbose path).
+ expect(args.verbose).toBe(true);
+ });
+
+ test('gObjectSubmit does not coerce numeric inputs to strings in transit', async () => {
+ // Extra-paranoid guard: even if a caller passes a BigInt, we
+ // should forward the BigInt (syscoin-js will serialize it),
+ // not a String cast that would break the type contract.
+ const { factory, calls } = makeFakeRpcServices();
+ const rpc = createProposalRpc(factory);
+ await rpc.gObjectSubmit('0', 2, 1800000001, 'ab', 'cd');
+ const args = calls.gObject_submit[0];
+ expect(args[1]).toBe(2);
+ expect(args[2]).toBe(1800000001);
+ // Explicit: not strings.
+ expect(args[1]).not.toBe('2');
+ expect(args[2]).not.toBe('1800000001');
+ });
+
+ test('gObjectCheck forwards exactly one positional arg (hex_data)', async () => {
+ // Codex PR8 round 6 P1 guard: Core's gobject_check takes a
+ // single positional arg. Historically this adapter sent four
+ // and Core rejected with RPC_INVALID_PARAMS. Make sure we do
+ // not regress back to the 4-arg shape.
+ const { factory, calls } = makeFakeRpcServices();
+ const rpc = createProposalRpc(factory);
+ const res = await rpc.gObjectCheck('ab12');
+ expect(res).toEqual({ 'Object status': 'OK' });
+ expect(calls.gObject_check).toHaveLength(1);
+ expect(calls.gObject_check[0]).toHaveLength(1);
+ expect(calls.gObject_check[0][0]).toBe('ab12');
+ });
+
+ test('getRawTransaction maps boolean verbose to 0/1', async () => {
+ // syscoin Core's getrawtransaction `verbose` param accepts 0
+ // or 1 (integer). The adapter translates the JS boolean to
+ // that wire form so callers can use a clean `true`/`false`
+ // API.
+ const { factory, calls } = makeFakeRpcServices();
+ const rpc = createProposalRpc(factory);
+ await rpc.getRawTransaction('a'.repeat(64), true);
+ await rpc.getRawTransaction('b'.repeat(64), false);
+ expect(calls.getRawTransaction).toHaveLength(2);
+ expect(calls.getRawTransaction[0]).toEqual(['a'.repeat(64), 1]);
+ expect(calls.getRawTransaction[1]).toEqual(['b'.repeat(64), 0]);
+ });
+
+ test('errors from syscoin-js bubble up unchanged', async () => {
+ const { factory } = makeFakeRpcServices({
+ gObject_submit: () => {
+ throw new Error('rpc-type-error: Expected type number, got string');
+ },
+ });
+ const rpc = createProposalRpc(factory);
+ await expect(
+ rpc.gObjectSubmit('0', 1, 1, 'ab', 'cd')
+ ).rejects.toThrow(/Expected type number/);
+ });
+});
diff --git a/lib/proposalSubmissions.js b/lib/proposalSubmissions.js
new file mode 100644
index 0000000..1f7bc6d
--- /dev/null
+++ b/lib/proposalSubmissions.js
@@ -0,0 +1,553 @@
+'use strict';
+
+// proposal_submissions repository.
+//
+// A submission row represents a proposal the user has committed to
+// publishing. The canonical hashing fields (parent_hash, revision,
+// time_unix, data_hex, proposal_hash) are FROZEN at create() time —
+// the repo exposes no API to mutate them afterwards because the
+// collateral OP_RETURN commits to proposal_hash; changing any
+// contributing field would de-couple the on-chain collateral from
+// the object we later submit, which is unrecoverable.
+//
+// State machine (see db/migrations/001_init.sql for prose):
+//
+// create() attachCollateral(txid)
+// │ │
+// ▼ ▼
+// ┌──────────┐ ┌─────────────────────┐
+// │ prepared │────────────▶│ awaiting_collateral │
+// └──────────┘ └─────────────────────┘
+// │ │ │
+// │ remove() │ │ markSubmitted({ governanceHash })
+// ▼ │ ▼
+// (deleted) │ ┌───────────┐
+// │ │ submitted │ (terminal)
+// │ └───────────┘
+// │ markFailed({ reason, detail })
+// ▼
+// ┌────────┐
+// │ failed │ (terminal)
+// └────────┘
+//
+// Transitions are enforced by the repo; a transition the state
+// machine does not allow raises an error with a stable `.code` string
+// the route layer can surface.
+
+const STATUS = Object.freeze({
+ PREPARED: 'prepared',
+ AWAITING_COLLATERAL: 'awaiting_collateral',
+ SUBMITTED: 'submitted',
+ FAILED: 'failed',
+});
+
+const ALL_STATUSES = new Set(Object.values(STATUS));
+
+function toBigIntSats(v) {
+ if (typeof v === 'bigint') return v;
+ if (typeof v === 'number') {
+ if (!Number.isFinite(v) || !Number.isInteger(v)) {
+ throw new Error('payment_amount_sats must be an integer');
+ }
+ return BigInt(v);
+ }
+ if (typeof v === 'string' && /^-?\d+$/.test(v)) return BigInt(v);
+ throw new Error('payment_amount_sats must be number | bigint | digit-string');
+}
+
+function mapRow(row) {
+ if (!row) return null;
+ const amount = row.payment_amount_sats;
+ return {
+ id: Number(row.id),
+ userId: Number(row.user_id),
+ draftId: row.draft_id == null ? null : Number(row.draft_id),
+
+ parentHash: row.parent_hash,
+ revision: Number(row.revision),
+ timeUnix: Number(row.time_unix),
+ dataHex: row.data_hex,
+ proposalHash: row.proposal_hash,
+
+ title: row.title,
+ name: row.name,
+ url: row.url,
+ paymentAddress: row.payment_address,
+ paymentAmountSats:
+ typeof amount === 'bigint' ? amount : BigInt(amount ?? 0),
+ paymentCount: Number(row.payment_count),
+ startEpoch: Number(row.start_epoch),
+ endEpoch: Number(row.end_epoch),
+
+ status: row.status,
+ collateralTxid: row.collateral_txid,
+ collateralConfs: Number(row.collateral_confs),
+ governanceHash: row.governance_hash,
+ failReason: row.fail_reason,
+ failDetail: row.fail_detail,
+
+ createdAt: Number(row.created_at),
+ updatedAt: Number(row.updated_at),
+ };
+}
+
+function err(code, message) {
+ const e = new Error(message);
+ e.code = code;
+ return e;
+}
+
+function requireString(name, v) {
+ if (typeof v !== 'string' || v.length === 0) {
+ throw err(`${name}_required`, `${name} is required`);
+ }
+ return v;
+}
+
+function createProposalSubmissionsRepo(db, opts = {}) {
+ const now = opts.now ?? (() => Date.now());
+
+ const insert = db
+ .prepare(
+ `INSERT INTO proposal_submissions (
+ user_id, draft_id,
+ parent_hash, revision, time_unix, data_hex, proposal_hash,
+ title, name, url, payment_address, payment_amount_sats,
+ payment_count, start_epoch, end_epoch,
+ status, collateral_txid, collateral_confs,
+ governance_hash, fail_reason, fail_detail,
+ created_at, updated_at
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
+ )
+ .safeIntegers(true);
+
+ const byIdStmt = db
+ .prepare(`SELECT * FROM proposal_submissions WHERE id = ?`)
+ .safeIntegers(true);
+ const byIdForUserStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions WHERE id = ? AND user_id = ?`
+ )
+ .safeIntegers(true);
+ const listForUserStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions
+ WHERE user_id = ?
+ ORDER BY updated_at DESC, id DESC`
+ )
+ .safeIntegers(true);
+ const byStatusStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions
+ WHERE status = ?
+ ORDER BY updated_at ASC, id ASC`
+ )
+ .safeIntegers(true);
+ const byTxidStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions WHERE collateral_txid = ?`
+ )
+ .safeIntegers(true);
+ const byGovHashStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions WHERE governance_hash = ?`
+ )
+ .safeIntegers(true);
+ const byProposalHashForUserStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions
+ WHERE user_id = ? AND proposal_hash = ?`
+ )
+ .safeIntegers(true);
+ // Codex PR8 round 2 P1: proposalHash bakes in `time` (seconds since
+ // epoch), so two retries of the same logical /prepare that cross a
+ // one-second boundary hash differently and bypass a hash-keyed
+ // idempotency check. Look up by the time-free canonical payload
+ // (data_hex) instead. Scoped to the user so two different users
+ // submitting coincidentally-identical text get independent rows.
+ const byPreparedDataHexForUserStmt = db
+ .prepare(
+ `SELECT * FROM proposal_submissions
+ WHERE user_id = ? AND data_hex = ? AND status = 'prepared'
+ ORDER BY created_at DESC, id DESC
+ LIMIT 1`
+ )
+ .safeIntegers(true);
+
+ const deleteForUserStmt = db.prepare(
+ `DELETE FROM proposal_submissions
+ WHERE id = ? AND user_id = ?
+ AND status IN ('prepared', 'failed')`
+ );
+
+ function create(input) {
+ const {
+ userId,
+ draftId = null,
+ parentHash = '0',
+ revision = 1,
+ timeUnix,
+ dataHex,
+ proposalHash,
+ title = '',
+ name,
+ url,
+ paymentAddress,
+ paymentAmountSats,
+ paymentCount = 1,
+ startEpoch,
+ endEpoch,
+ } = input || {};
+
+ if (!Number.isInteger(userId) || userId <= 0) {
+ throw err('user_required', 'userId is required');
+ }
+ if (!Number.isInteger(timeUnix) || timeUnix <= 0) {
+ throw err('time_required', 'timeUnix must be positive integer');
+ }
+ requireString('dataHex', dataHex);
+ if (!/^[0-9a-f]*$/.test(dataHex) || dataHex.length % 2 !== 0) {
+ throw err('data_hex_invalid', 'dataHex must be lowercase hex, even length');
+ }
+ requireString('proposalHash', proposalHash);
+ if (!/^[0-9a-f]{64}$/.test(proposalHash)) {
+ throw err('proposal_hash_invalid', 'proposalHash must be 64 lowercase hex chars');
+ }
+ requireString('name', name);
+ requireString('url', url);
+ requireString('paymentAddress', paymentAddress);
+ if (!Number.isInteger(startEpoch) || !Number.isInteger(endEpoch)) {
+ throw err('epoch_required', 'startEpoch/endEpoch must be integers');
+ }
+ const amt = toBigIntSats(paymentAmountSats);
+ if (amt <= 0n) throw err('amount_invalid', 'paymentAmountSats must be > 0');
+
+ const t = now();
+ const r = insert.run(
+ userId,
+ draftId,
+ parentHash,
+ revision,
+ timeUnix,
+ dataHex,
+ proposalHash,
+ title,
+ name,
+ url,
+ paymentAddress,
+ amt,
+ Math.trunc(Number(paymentCount)) || 1,
+ startEpoch,
+ endEpoch,
+ STATUS.PREPARED,
+ null, // collateral_txid
+ 0, // collateral_confs
+ null, // governance_hash
+ null, // fail_reason
+ null, // fail_detail
+ t,
+ t
+ );
+ return mapRow(byIdStmt.get(r.lastInsertRowid));
+ }
+
+ function getById(id) {
+ return mapRow(byIdStmt.get(id));
+ }
+ function getByIdForUser(id, userId) {
+ return mapRow(byIdForUserStmt.get(id, userId));
+ }
+ function listForUser(userId) {
+ return listForUserStmt.all(userId).map(mapRow);
+ }
+ function listByStatus(status) {
+ if (!ALL_STATUSES.has(status)) {
+ throw err('status_invalid', `unknown status: ${status}`);
+ }
+ return byStatusStmt.all(status).map(mapRow);
+ }
+ function findByCollateralTxid(txid) {
+ if (!txid) return null;
+ return mapRow(byTxidStmt.get(txid));
+ }
+ function findByGovernanceHash(hash) {
+ if (!hash) return null;
+ return mapRow(byGovHashStmt.get(hash));
+ }
+ function findByProposalHashForUser(userId, hash) {
+ if (!hash) return null;
+ return mapRow(byProposalHashForUserStmt.get(userId, hash));
+ }
+ function findPreparedByDataHexForUser(userId, dataHex) {
+ if (!dataHex) return null;
+ return mapRow(byPreparedDataHexForUserStmt.get(userId, dataHex));
+ }
+
+ // prepared → awaiting_collateral
+ function attachCollateral(id, userId, txid) {
+ if (typeof txid !== 'string' || !/^[0-9a-fA-F]{64}$/.test(txid)) {
+ throw err('txid_invalid', 'txid must be 64-char hex');
+ }
+ const txidLower = txid.toLowerCase();
+ const existing = byIdForUserStmt.get(id, userId);
+ if (!existing) return null;
+ if (existing.status !== STATUS.PREPARED) {
+ throw err(
+ 'status_not_prepared',
+ `cannot attach collateral from status "${existing.status}"`
+ );
+ }
+ // Pre-check the txid to produce a friendly error rather than a
+ // raw SQLite UNIQUE message. This read-before-write catches the
+ // common case (same user clicks twice, different rows already
+ // wearing the txid) without needing to round-trip the DB error.
+ const clash = byTxidStmt.get(txidLower);
+ if (clash) {
+ throw err(
+ 'txid_already_used',
+ 'This transaction is already associated with another proposal.'
+ );
+ }
+ // Codex PR8 round 4 P1: the prior UPDATE only filtered on
+ // id/user_id, so the pre-read status check + write were NOT
+ // atomic across concurrent workers. Two racing /txid requests
+ // for the same row could both see status='prepared', both pass
+ // the clash check (if txids differ), and both UPDATE — with the
+ // second silently overwriting the first's collateral_txid. That
+ // violates the state machine and can bind the submission to the
+ // wrong collateral transaction. Fix: fold the expected old
+ // status into the WHERE clause as a compare-and-swap and reject
+ // on zero-changes.
+ let info;
+ try {
+ info = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, collateral_txid = ?, updated_at = ?
+ WHERE id = ? AND user_id = ? AND status = ?`
+ ).run(
+ STATUS.AWAITING_COLLATERAL,
+ txidLower,
+ now(),
+ id,
+ userId,
+ STATUS.PREPARED
+ );
+ } catch (e) {
+ // Codex PR8 round 3 P2: the read-above + write-here pair is
+ // not atomic. Two concurrent attach-collateral requests (same
+ // user, two different rows, same txid) can both pass the clash
+ // check and both hit this UPDATE — the partial unique index
+ // `idx_proposal_submissions_collateral_txid` will reject the
+ // second write with SQLITE_CONSTRAINT_UNIQUE. Normalize to the
+ // same stable `txid_already_used` code the pre-check raises
+ // so the route layer returns 409 consistently instead of
+ // bubbling up as a generic 500.
+ const raw = String((e && e.message) || e);
+ if (
+ (e && (e.code === 'SQLITE_CONSTRAINT_UNIQUE' ||
+ e.code === 'SQLITE_CONSTRAINT')) ||
+ /UNIQUE constraint failed/i.test(raw)
+ ) {
+ throw err(
+ 'txid_already_used',
+ 'This transaction is already associated with another proposal.'
+ );
+ }
+ throw e;
+ }
+ if (Number(info.changes) === 0) {
+ // CAS miss: between our pre-read and this UPDATE, another
+ // actor (different worker or request) changed this row's
+ // status out of 'prepared'. Re-read to produce a precise,
+ // stable error rather than a silent overwrite.
+ const current = byIdForUserStmt.get(id, userId);
+ if (!current) return null; // row disappeared (remove())
+ if (current.status !== STATUS.PREPARED) {
+ throw err(
+ 'status_not_prepared',
+ `cannot attach collateral from status "${current.status}"`
+ );
+ }
+ // Row is still prepared but UPDATE matched zero rows — should
+ // not happen. Surface as a retryable generic error so the
+ // client can retry instead of receiving a false-OK.
+ throw err(
+ 'attach_cas_miss',
+ 'Could not attach collateral due to a concurrent update; please retry.'
+ );
+ }
+ return mapRow(byIdStmt.get(id));
+ }
+
+ // Dispatcher: bump confs for a row the dispatcher already identified
+ // via listByStatus('awaiting_collateral'). No user scope.
+ function updateConfirmations(id, confs) {
+ if (!Number.isInteger(confs) || confs < 0) {
+ throw err('confs_invalid', 'confs must be non-negative integer');
+ }
+ db.prepare(
+ `UPDATE proposal_submissions
+ SET collateral_confs = ?, updated_at = ?
+ WHERE id = ?`
+ ).run(confs, now(), id);
+ return mapRow(byIdStmt.get(id));
+ }
+
+ // awaiting_collateral → submitted (dispatcher only; no user scope)
+ function markSubmitted(id, { governanceHash }) {
+ if (!/^[0-9a-f]{64}$/i.test(governanceHash || '')) {
+ throw err(
+ 'governance_hash_invalid',
+ 'governanceHash must be 64 hex chars'
+ );
+ }
+ const row = byIdStmt.get(id);
+ if (!row) return null;
+ if (row.status !== STATUS.AWAITING_COLLATERAL) {
+ throw err(
+ 'status_not_awaiting',
+ `cannot mark submitted from status "${row.status}"`
+ );
+ }
+ const gh = governanceHash.toLowerCase();
+ const clash = byGovHashStmt.get(gh);
+ if (clash && Number(clash.id) !== Number(id)) {
+ throw err(
+ 'governance_hash_clash',
+ 'governance_hash already recorded on another row'
+ );
+ }
+ // Codex PR8 round 5 P1: the read-above + write-here pair is not
+ // atomic. Two dispatcher workers processing the same row can
+ // each read `awaiting_collateral`, each pass the clash check
+ // (governance_hash identical), and each UPDATE — the second
+ // silently overwriting updated_at. More importantly, each
+ // worker then treats the transition as its *own* success and
+ // fires duplicate submitted-side effects (notably the
+ // "proposal submitted" email). Fold the expected old status
+ // into the WHERE clause as a compare-and-swap and treat zero
+ // changed rows as a raced no-op — the caller returns `null`
+ // and the winning worker keeps its role as the emit-once
+ // origin.
+ let info;
+ try {
+ info = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, governance_hash = ?, updated_at = ?
+ WHERE id = ? AND status = ?`
+ ).run(STATUS.SUBMITTED, gh, now(), id, STATUS.AWAITING_COLLATERAL);
+ } catch (e) {
+ // Codex PR8 round 11 P2: the read-above + write-here clash
+ // check is still NOT atomic — between the `byGovHashStmt.get`
+ // above and this UPDATE, another worker can insert/update
+ // another row to claim the same governance_hash. The partial
+ // UNIQUE index on `governance_hash` (a single governance hash
+ // may only belong to one submission row) will then reject
+ // this UPDATE with SQLITE_CONSTRAINT_UNIQUE. The dispatcher's
+ // duplicate-recovery path expects `governance_hash_clash` as
+ // the stable signal to transition the row to terminal failed;
+ // bubbling the raw SQLite error up instead leaves the
+ // dispatcher's `clashed` flag false and the row stuck in
+ // awaiting_collateral to be retried forever. Normalize here
+ // (same pattern used in attachCollateral for the collateral-
+ // txid unique clash) so the caller always sees one code.
+ const raw = String((e && e.message) || e);
+ if (
+ (e && (e.code === 'SQLITE_CONSTRAINT_UNIQUE' ||
+ e.code === 'SQLITE_CONSTRAINT')) ||
+ /UNIQUE constraint failed/i.test(raw)
+ ) {
+ throw err(
+ 'governance_hash_clash',
+ 'governance_hash already recorded on another row'
+ );
+ }
+ throw e;
+ }
+ if (Number(info.changes) === 0) {
+ // Raced: another worker flipped status out of
+ // `awaiting_collateral` between our pre-read and this UPDATE.
+ // Return null so the dispatcher treats this pass as a silent
+ // no-op rather than a successful submission.
+ return null;
+ }
+ return mapRow(byIdStmt.get(id));
+ }
+
+ // Any non-terminal state → failed. Terminal states are rejected so
+ // we don't accidentally overwrite a 'submitted' row with 'failed'
+ // due to a flaky secondary check.
+ function markFailed(id, { reason, detail }) {
+ requireString('reason', reason);
+ const row = byIdStmt.get(id);
+ if (!row) return null;
+ if (row.status === STATUS.SUBMITTED || row.status === STATUS.FAILED) {
+ throw err(
+ 'status_terminal',
+ `cannot mark failed from terminal status "${row.status}"`
+ );
+ }
+ // Codex PR8 round 7 P1: the pre-read above is not sufficient on
+ // its own in a multi-worker dispatcher setup. Between the
+ // `byIdStmt.get(id)` and this UPDATE another worker can flip
+ // the same row to `submitted` (via markSubmitted's own CAS) or
+ // to `failed` (via a sibling error path), and we would still
+ // stomp the row back to `failed` — corrupting the terminal
+ // state and potentially firing the wrong side effects (onFailed
+ // after an onSubmitted already went out). Gate the UPDATE on
+ // the row still being in a non-terminal status.
+ const info = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, fail_reason = ?, fail_detail = ?, updated_at = ?
+ WHERE id = ? AND status NOT IN (?, ?)`
+ ).run(
+ STATUS.FAILED,
+ reason,
+ detail == null ? null : String(detail),
+ now(),
+ id,
+ STATUS.SUBMITTED,
+ STATUS.FAILED
+ );
+ if (Number(info.changes) === 0) {
+ // Raced: another worker transitioned this row into a terminal
+ // state between our pre-read and the UPDATE. Return null so
+ // the dispatcher treats this pass as a silent no-op — matches
+ // the markSubmitted CAS-miss contract.
+ return null;
+ }
+ return mapRow(byIdStmt.get(id));
+ }
+
+ // Users can only delete rows that have NOT been published. This
+ // matches the partial DELETE statement above. Returns the number of
+ // rows affected.
+ function remove(id, userId) {
+ const info = deleteForUserStmt.run(id, userId);
+ return Number(info.changes);
+ }
+
+ return {
+ STATUS,
+ create,
+ getById,
+ getByIdForUser,
+ listForUser,
+ listByStatus,
+ findByCollateralTxid,
+ findByGovernanceHash,
+ findByProposalHashForUser,
+ findPreparedByDataHexForUser,
+ attachCollateral,
+ updateConfirmations,
+ markSubmitted,
+ markFailed,
+ remove,
+ };
+}
+
+module.exports = {
+ createProposalSubmissionsRepo,
+ STATUS,
+ mapRow,
+};
diff --git a/lib/proposalSubmissions.test.js b/lib/proposalSubmissions.test.js
new file mode 100644
index 0000000..7fdc4f2
--- /dev/null
+++ b/lib/proposalSubmissions.test.js
@@ -0,0 +1,818 @@
+'use strict';
+
+const { openDatabase } = require('./db');
+const {
+ createProposalSubmissionsRepo,
+ STATUS,
+} = require('./proposalSubmissions');
+
+const FAKE_SALT_V = 'aa'.repeat(32);
+
+function seedUser(db, email = 'u@x.com') {
+ const t = Date.now();
+ const r = db
+ .prepare(
+ `INSERT INTO users (email, stored_auth, salt_v, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?)`
+ )
+ .run(email, 'h', FAKE_SALT_V, t, t);
+ return Number(r.lastInsertRowid);
+}
+
+function validInput(userId, overrides = {}) {
+ return {
+ userId,
+ parentHash: '0',
+ revision: 1,
+ timeUnix: 1800000000,
+ dataHex: '7b2274797065223a317d', // {"type":1}
+ proposalHash: 'a'.repeat(64),
+ title: 'Test',
+ name: 'test-proposal',
+ url: 'https://example.org/p',
+ paymentAddress: 'sys1qabcdefghij1234567890',
+ paymentAmountSats: 4250000000n,
+ paymentCount: 1,
+ startEpoch: 1800000000,
+ endEpoch: 1802592000,
+ ...overrides,
+ };
+}
+
+// Custom matcher-esque helper: assert that calling `fn` throws an Error
+// whose .code matches the given string. We check the machine-stable
+// `.code` (not the human message), since that's what the route layer
+// maps to HTTP status / user copy.
+function expectThrowsCode(fn, expectedCode) {
+ try {
+ fn();
+ } catch (e) {
+ expect(e.code).toBe(expectedCode);
+ return;
+ }
+ throw new Error(`expected throw with code ${expectedCode}, got nothing`);
+}
+
+function setup() {
+ const db = openDatabase(':memory:');
+ const user1 = seedUser(db, 'a@x.com');
+ const user2 = seedUser(db, 'b@x.com');
+ let clock = 1_700_000_000_000;
+ const repo = createProposalSubmissionsRepo(db, { now: () => clock });
+ return {
+ db,
+ repo,
+ user1,
+ user2,
+ tick: (ms = 1000) => {
+ clock += ms;
+ return clock;
+ },
+ };
+}
+
+// ---------------- create ----------------
+describe('create', () => {
+ test('creates a row in status=prepared with all canonical fields', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(s.status).toBe(STATUS.PREPARED);
+ expect(s.userId).toBe(user1);
+ expect(s.proposalHash).toBe('a'.repeat(64));
+ expect(s.paymentAmountSats).toBe(4250000000n);
+ expect(s.collateralTxid).toBeNull();
+ expect(s.collateralConfs).toBe(0);
+ expect(s.governanceHash).toBeNull();
+ });
+
+ test('preserves BigInt amount above 2^53', () => {
+ const { repo, user1 } = setup();
+ const huge = 18014398509481985n;
+ const s = repo.create(validInput(user1, { paymentAmountSats: huge }));
+ expect(s.paymentAmountSats).toBe(huge);
+ expect(repo.getById(s.id).paymentAmountSats).toBe(huge);
+ });
+
+ test.each([
+ ['userId', { userId: 0 }, 'user_required'],
+ ['timeUnix', { timeUnix: 0 }, 'time_required'],
+ ['dataHex', { dataHex: 'not hex' }, 'data_hex_invalid'],
+ ['proposalHash', { proposalHash: 'short' }, 'proposal_hash_invalid'],
+ ['name', { name: '' }, 'name_required'],
+ ['url', { url: '' }, 'url_required'],
+ ['paymentAddress', { paymentAddress: '' }, 'paymentAddress_required'],
+ ['paymentAmountSats (zero)', { paymentAmountSats: 0n }, 'amount_invalid'],
+ ['startEpoch missing', { startEpoch: null }, 'epoch_required'],
+ ])('rejects invalid input: %s', (_label, patch, code) => {
+ const { repo, user1 } = setup();
+ expectThrowsCode(() => repo.create(validInput(user1, patch)), code);
+ });
+});
+
+// ---------------- user isolation ----------------
+describe('user isolation', () => {
+ test('getByIdForUser returns null for a non-owner', () => {
+ const { repo, user1, user2 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(repo.getByIdForUser(s.id, user2)).toBeNull();
+ });
+
+ test('listForUser scopes to owner', () => {
+ const { repo, user1, user2 } = setup();
+ repo.create(validInput(user1, { proposalHash: 'a'.repeat(64) }));
+ repo.create(validInput(user2, { proposalHash: 'b'.repeat(64) }));
+ expect(repo.listForUser(user1).map((r) => r.proposalHash)).toEqual([
+ 'a'.repeat(64),
+ ]);
+ expect(repo.listForUser(user2).map((r) => r.proposalHash)).toEqual([
+ 'b'.repeat(64),
+ ]);
+ });
+});
+
+// ---------------- attachCollateral ----------------
+describe('attachCollateral', () => {
+ const txid = 'f'.repeat(64);
+
+ test('prepared → awaiting_collateral and stores lowercase txid', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ const out = repo.attachCollateral(s.id, user1, txid.toUpperCase());
+ expect(out.status).toBe(STATUS.AWAITING_COLLATERAL);
+ expect(out.collateralTxid).toBe(txid);
+ });
+
+ test('returns null for a non-owner', () => {
+ const { repo, user1, user2 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(repo.attachCollateral(s.id, user2, txid)).toBeNull();
+ });
+
+ test('rejects from non-prepared status', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, txid);
+ expectThrowsCode(
+ () => repo.attachCollateral(s.id, user1, txid),
+ 'status_not_prepared'
+ );
+ });
+
+ test('rejects an invalid txid', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ expectThrowsCode(
+ () => repo.attachCollateral(s.id, user1, 'not-hex'),
+ 'txid_invalid'
+ );
+ });
+
+ test('rejects a txid already used by another submission', () => {
+ const { repo, user1, user2 } = setup();
+ const a = repo.create(validInput(user1));
+ const b = repo.create(
+ validInput(user2, { proposalHash: 'b'.repeat(64) })
+ );
+ repo.attachCollateral(a.id, user1, txid);
+ expectThrowsCode(
+ () => repo.attachCollateral(b.id, user2, txid),
+ 'txid_already_used'
+ );
+ });
+
+ test(
+ 'normalizes write-level unique constraint races to txid_already_used (Codex round 3 P2)',
+ () => {
+ // Scenario: two concurrent attach-collateral requests for the
+ // same txid both pass the read-before-write pre-check and both
+ // issue an UPDATE. In-process we can't truly interleave
+ // better-sqlite3 calls, so we simulate the second request's
+ // race by planting the clashing row BETWEEN the pre-check and
+ // the UPDATE via a direct SQL write bypass. The partial unique
+ // index `idx_proposal_submissions_collateral_txid` must then
+ // reject the UPDATE, and the repo must translate that raw
+ // SQLite error into the same stable `.code = txid_already_used`
+ // the pre-check raises — so the route layer keeps returning a
+ // clean 409 regardless of which branch fired.
+ const { db, repo, user1, user2 } = setup();
+ const a = repo.create(validInput(user1));
+ const b = repo.create(
+ validInput(user2, { proposalHash: 'b'.repeat(64) })
+ );
+ // Monkey-patch byTxidStmt indirectly: there's no hook, so
+ // exercise the equivalent behavior by writing a competing row
+ // under the radar of the pre-check. We do this by racing the
+ // UPDATE via a second prepared statement that the repo will
+ // not observe until AFTER its pre-read has already returned
+ // null. Because better-sqlite3 is synchronous, we achieve the
+ // same effect by intercepting the prepare + run once.
+ const origPrepare = db.prepare.bind(db);
+ const mutator = db
+ .prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, collateral_txid = ?, updated_at = ?
+ WHERE id = ? AND user_id = ?`
+ );
+ let planted = false;
+ db.prepare = (sql) => {
+ const stmt = origPrepare(sql);
+ if (!planted && /SET status = \?, collateral_txid = \?/.test(sql)) {
+ planted = true;
+ const originalRun = stmt.run.bind(stmt);
+ stmt.run = (...args) => {
+ // Plant row A's collateral_txid BEFORE the repo's own
+ // UPDATE fires. This mirrors the "second tick beat us to
+ // it" race that the partial unique index is there to
+ // catch.
+ mutator.run('awaiting_collateral', txid, Date.now(), a.id, user1);
+ return originalRun(...args);
+ };
+ }
+ return stmt;
+ };
+ try {
+ expectThrowsCode(
+ () => repo.attachCollateral(b.id, user2, txid),
+ 'txid_already_used'
+ );
+ } finally {
+ db.prepare = origPrepare;
+ }
+ }
+ );
+
+ test(
+ 'CAS: concurrent status transition out of prepared is rejected, not silently overwritten (Codex round 4 P1)',
+ () => {
+ // Scenario: row S is 'prepared'. Two concurrent attach
+ // requests arrive with DIFFERENT txids. Both pass the
+ // pre-read status check. Before the prior fix, the later
+ // UPDATE would silently overwrite collateral_txid — binding
+ // the submission to the wrong collateral.
+ //
+ // With the CAS guard (AND status='prepared' in WHERE), the
+ // racer that flips status first wins; the other's UPDATE
+ // matches 0 rows and we throw `status_not_prepared` with
+ // the row's real current state intact.
+ const { db, repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ const txidA = 'a'.repeat(64);
+ const txidB = 'b'.repeat(64);
+
+ // Prepare a writer that flips the row to awaiting_collateral
+ // with txidA — the simulated "other worker that beat us".
+ const flip = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, collateral_txid = ?, updated_at = ?
+ WHERE id = ?`
+ );
+
+ const origPrepare = db.prepare.bind(db);
+ let planted = false;
+ db.prepare = (sql) => {
+ const stmt = origPrepare(sql);
+ // Intercept only the repo's CAS UPDATE (now includes
+ // `AND status = ?`) — do NOT intercept `flip` above,
+ // which is a different SQL string.
+ if (
+ !planted &&
+ /SET status = \?, collateral_txid = \?/.test(sql) &&
+ /AND status = \?/.test(sql)
+ ) {
+ planted = true;
+ const originalRun = stmt.run.bind(stmt);
+ stmt.run = (...args) => {
+ flip.run('awaiting_collateral', txidA, Date.now(), s.id);
+ return originalRun(...args);
+ };
+ }
+ return stmt;
+ };
+
+ try {
+ expectThrowsCode(
+ () => repo.attachCollateral(s.id, user1, txidB),
+ 'status_not_prepared'
+ );
+ } finally {
+ db.prepare = origPrepare;
+ }
+
+ // Critical invariant: the row's collateral_txid must STILL
+ // be the winner's (txidA), never the loser's (txidB).
+ const final = repo.getByIdForUser(s.id, user1);
+ expect(final.status).toBe(STATUS.AWAITING_COLLATERAL);
+ expect(final.collateralTxid).toBe(txidA);
+ }
+ );
+});
+
+// ---------------- partial unique index: per-user/dataHex/prepared ----
+describe('partial unique index: prepared rows are unique per (user_id, data_hex)', () => {
+ test(
+ 'two prepared rows with the same user+dataHex are rejected at DB layer (Codex round 3 P2)',
+ () => {
+ const { repo, user1 } = setup();
+ repo.create(validInput(user1));
+ expect(() =>
+ repo.create(
+ validInput(user1, { proposalHash: 'b'.repeat(64) })
+ )
+ ).toThrow(/UNIQUE constraint failed/);
+ }
+ );
+
+ test(
+ 'same user+dataHex is allowed once the first row has left prepared (status moves it out of the partial index)',
+ () => {
+ const { repo, user1 } = setup();
+ const a = repo.create(validInput(user1));
+ // Advance a past prepared so the partial index no longer covers it.
+ repo.attachCollateral(a.id, user1, 'f'.repeat(64));
+ const b = repo.create(
+ validInput(user1, { proposalHash: 'b'.repeat(64) })
+ );
+ expect(b.status).toBe('prepared');
+ expect(b.id).not.toBe(a.id);
+ }
+ );
+
+ test(
+ 'two different users may each have a prepared row with the same dataHex',
+ () => {
+ const { repo, user1, user2 } = setup();
+ const a = repo.create(validInput(user1));
+ const b = repo.create(
+ validInput(user2, { proposalHash: 'b'.repeat(64) })
+ );
+ expect(a.id).not.toBe(b.id);
+ }
+ );
+});
+
+// ---------------- updateConfirmations ----------------
+describe('updateConfirmations', () => {
+ test('bumps confs and updated_at', () => {
+ const { repo, user1, tick } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ tick(1000);
+ const after = repo.updateConfirmations(s.id, 4);
+ expect(after.collateralConfs).toBe(4);
+ expect(after.updatedAt).toBeGreaterThan(s.updatedAt);
+ });
+
+ test('rejects negative/non-integer confs', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ expectThrowsCode(() => repo.updateConfirmations(s.id, -1), 'confs_invalid');
+ expectThrowsCode(() => repo.updateConfirmations(s.id, 1.5), 'confs_invalid');
+ });
+});
+
+// ---------------- markSubmitted ----------------
+describe('markSubmitted', () => {
+ const txid = 'a'.repeat(64);
+ const govHash = 'b'.repeat(64);
+
+ function arrange() {
+ const ctx = setup();
+ const s = ctx.repo.create(validInput(ctx.user1));
+ ctx.repo.attachCollateral(s.id, ctx.user1, txid);
+ return { ...ctx, s };
+ }
+
+ test('awaiting_collateral → submitted, records governanceHash lowercase', () => {
+ const { repo, s } = arrange();
+ const out = repo.markSubmitted(s.id, { governanceHash: govHash.toUpperCase() });
+ expect(out.status).toBe(STATUS.SUBMITTED);
+ expect(out.governanceHash).toBe(govHash);
+ });
+
+ test('rejects from a non-awaiting status', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ expectThrowsCode(
+ () => repo.markSubmitted(s.id, { governanceHash: govHash }),
+ 'status_not_awaiting'
+ );
+ });
+
+ test('rejects an invalid governance hash', () => {
+ const { repo, s } = arrange();
+ expectThrowsCode(
+ () => repo.markSubmitted(s.id, { governanceHash: 'bad' }),
+ 'governance_hash_invalid'
+ );
+ });
+
+ test('rejects a governance hash already recorded on another row', () => {
+ const { db, repo, user1, user2 } = setup();
+ const a = repo.create(validInput(user1));
+ const b = repo.create(validInput(user2, { proposalHash: 'c'.repeat(64) }));
+ repo.attachCollateral(a.id, user1, 'a'.repeat(64));
+ repo.attachCollateral(b.id, user2, 'd'.repeat(64));
+ repo.markSubmitted(a.id, { governanceHash: govHash });
+ expectThrowsCode(
+ () => repo.markSubmitted(b.id, { governanceHash: govHash }),
+ 'governance_hash_clash'
+ );
+ db.close();
+ });
+
+ test(
+ 'normalizes raw UNIQUE-constraint races on governance_hash to governance_hash_clash (Codex round 11 P2)',
+ () => {
+ // Regression for a race only reachable under concurrent
+ // dispatcher workers:
+ //
+ // 1. Worker A: reads row A (awaiting_collateral),
+ // pre-checks governance_hash clash → none. Pauses.
+ // 2. Worker B: reads row B, pre-checks clash → none,
+ // issues UPDATE to claim `gov_hash = X` → succeeds.
+ // 3. Worker A resumes and issues its UPDATE to claim the
+ // SAME `gov_hash = X`. The partial UNIQUE index on
+ // `governance_hash` rejects with SQLITE_CONSTRAINT_UNIQUE.
+ //
+ // Pre-round-11 that raw SQLite error bubbled up unwrapped.
+ // The dispatcher's duplicate-recovery branch only treats
+ // `code === 'governance_hash_clash'` as a terminal clash, so
+ // the raw error left `clashed=false`, the row stayed
+ // `awaiting_collateral`, and every subsequent dispatcher
+ // tick re-hit the same "already exists" from Core and looped
+ // forever with no terminal user-visible outcome.
+ //
+ // We can't faithfully reproduce the two-worker timing in an
+ // in-process test, but the end state is identical: a row
+ // owning `gov_hash = X` already exists AND the repo's
+ // pre-check lookup misses (the race window). Construct that
+ // by intercepting db.prepare BEFORE the repo is built so the
+ // cached `byGovHashStmt` returns null.
+ const db = openDatabase(':memory:');
+ const user1 = seedUser(db, 'a@x.com');
+ const user2 = seedUser(db, 'b@x.com');
+ // First repo: normal. Use it to set up row A owning the
+ // hash legitimately (so the UNIQUE index is populated).
+ const repoReal = createProposalSubmissionsRepo(db);
+ const a = repoReal.create(validInput(user1));
+ const b = repoReal.create(
+ validInput(user2, { proposalHash: 'c'.repeat(64) })
+ );
+ repoReal.attachCollateral(a.id, user1, 'a'.repeat(64));
+ repoReal.attachCollateral(b.id, user2, 'd'.repeat(64));
+ repoReal.markSubmitted(a.id, { governanceHash: govHash });
+ // Second repo: same db, but patched `prepare` forces the
+ // pre-check lookup to miss. UPDATE is now the boundary
+ // that raises the UNIQUE violation — the exact code path
+ // round-11 P2 added normalization for.
+ const origPrepare = db.prepare.bind(db);
+ const patchedDb = new Proxy(db, {
+ get(target, prop) {
+ if (prop === 'prepare') {
+ return (sql) => {
+ const stmt = origPrepare(sql);
+ if (
+ /FROM\s+proposal_submissions\s+WHERE\s+governance_hash\s*=\s*\?/i.test(
+ sql
+ )
+ ) {
+ return new Proxy(stmt, {
+ get(t, p) {
+ if (p === 'get') return () => null;
+ const v = t[p];
+ return typeof v === 'function' ? v.bind(t) : v;
+ },
+ });
+ }
+ return stmt;
+ };
+ }
+ const v = target[prop];
+ return typeof v === 'function' ? v.bind(target) : v;
+ },
+ });
+ const repoPatched = createProposalSubmissionsRepo(patchedDb);
+ expectThrowsCode(
+ () => repoPatched.markSubmitted(b.id, { governanceHash: govHash }),
+ 'governance_hash_clash'
+ );
+ db.close();
+ }
+ );
+
+ test(
+ 'CAS: concurrent status transition out of awaiting_collateral is a no-op, not a duplicate submit (Codex round 5 P1)',
+ () => {
+ // Scenario: two dispatcher workers both pick up the same
+ // awaiting_collateral row. Both call rpc.gObjectSubmit, both
+ // (eventually) reach markSubmitted. Before the round-5 CAS
+ // guard the UPDATE only filtered by id, so BOTH would pass
+ // their pre-read status check AND BOTH would commit their
+ // UPDATE — each treating the transition as its own success
+ // and each firing onSubmitted (duplicate "submitted" emails).
+ //
+ // With `AND status = 'awaiting_collateral'` folded into the
+ // WHERE clause, only the first UPDATE changes a row; the
+ // second matches zero rows and we return null so the
+ // dispatcher's `if (submittedRow)` guard skips the second
+ // hook fire. The winner keeps the emit-once role.
+ const { db, repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+
+ // Prepare a direct writer that flips the row to 'submitted'
+ // with the SAME governance hash — the simulated winner.
+ const flip = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, governance_hash = ?, updated_at = ?
+ WHERE id = ?`
+ );
+
+ const origPrepare = db.prepare.bind(db);
+ let planted = false;
+ db.prepare = (sql) => {
+ const stmt = origPrepare(sql);
+ // Intercept only the repo's CAS UPDATE (contains the
+ // governance_hash set and the `AND status = ?` guard).
+ if (
+ !planted &&
+ /SET status = \?, governance_hash = \?/.test(sql) &&
+ /AND status = \?/.test(sql)
+ ) {
+ planted = true;
+ const originalRun = stmt.run.bind(stmt);
+ stmt.run = (...args) => {
+ flip.run('submitted', govHash, Date.now(), s.id);
+ return originalRun(...args);
+ };
+ }
+ return stmt;
+ };
+
+ let out;
+ try {
+ out = repo.markSubmitted(s.id, { governanceHash: govHash });
+ } finally {
+ db.prepare = origPrepare;
+ }
+
+ // Critical: the losing worker got null, NOT a throw and NOT
+ // a fake-success row. Dispatcher code uses `if (submittedRow)`
+ // before firing onSubmitted, so null correctly skips the
+ // duplicate hook fire.
+ expect(out).toBeNull();
+
+ // And the row still reflects the winner's write — same hash,
+ // same terminal status.
+ const final = repo.getById(s.id);
+ expect(final.status).toBe(STATUS.SUBMITTED);
+ expect(final.governanceHash).toBe(govHash);
+ }
+ );
+});
+
+// ---------------- markFailed ----------------
+describe('markFailed', () => {
+ test('can fail from prepared', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ const out = repo.markFailed(s.id, {
+ reason: 'canceled',
+ detail: 'user aborted',
+ });
+ expect(out.status).toBe(STATUS.FAILED);
+ expect(out.failReason).toBe('canceled');
+ expect(out.failDetail).toBe('user aborted');
+ });
+
+ test('can fail from awaiting_collateral', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ const out = repo.markFailed(s.id, { reason: 'confirm_timeout' });
+ expect(out.status).toBe(STATUS.FAILED);
+ });
+
+ test('cannot fail a submitted row', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ repo.markSubmitted(s.id, { governanceHash: 'b'.repeat(64) });
+ expectThrowsCode(
+ () => repo.markFailed(s.id, { reason: 'late_fail' }),
+ 'status_terminal'
+ );
+ });
+
+ test('cannot fail an already-failed row (no double-writes)', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.markFailed(s.id, { reason: 'canceled' });
+ expectThrowsCode(
+ () => repo.markFailed(s.id, { reason: 'canceled' }),
+ 'status_terminal'
+ );
+ });
+
+ test(
+ 'CAS: concurrent status transition into submitted is NOT overwritten back to failed (Codex round 7 P1)',
+ () => {
+ // Scenario: a dispatcher worker takes the "terminal reject"
+ // path in proposalDispatcher.js (e.g. gObjectSubmit throws a
+ // validation-ish error) and calls markFailed. Between the
+ // pre-read status check and the UPDATE, a sibling worker has
+ // already flipped the same row to `submitted` (via
+ // markSubmitted's CAS). Before the round-7 CAS guard the
+ // UPDATE only filtered by id, so THIS call would stomp the
+ // winning `submitted` row back to `failed` — corrupting the
+ // terminal state and triggering onFailed side effects
+ // (wrong email, wrong UI) for a row that actually went live
+ // on-chain.
+ //
+ // With `AND status NOT IN ('submitted', 'failed')` folded
+ // into the WHERE clause, the UPDATE matches zero rows; we
+ // return null so the dispatcher's `if (failedRow)` guard
+ // skips the onFailed hook and the winner keeps emit-once.
+ const { db, repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+
+ // Direct writer that races the row to `submitted` AFTER the
+ // markFailed pre-read returns `awaiting_collateral` but
+ // BEFORE the CAS UPDATE runs. Same pattern as the R5 test.
+ const flip = db.prepare(
+ `UPDATE proposal_submissions
+ SET status = ?, governance_hash = ?, updated_at = ?
+ WHERE id = ?`
+ );
+
+ const origPrepare = db.prepare.bind(db);
+ let planted = false;
+ db.prepare = (sql) => {
+ const stmt = origPrepare(sql);
+ // Match the markFailed CAS UPDATE (sets fail_reason AND has
+ // the `AND status NOT IN (?, ?)` guard).
+ if (
+ !planted &&
+ /SET status = \?, fail_reason = \?/.test(sql) &&
+ /AND status NOT IN \(\?, \?\)/.test(sql)
+ ) {
+ planted = true;
+ const originalRun = stmt.run.bind(stmt);
+ stmt.run = (...args) => {
+ flip.run('submitted', 'b'.repeat(64), Date.now(), s.id);
+ return originalRun(...args);
+ };
+ }
+ return stmt;
+ };
+
+ let out;
+ try {
+ out = repo.markFailed(s.id, { reason: 'submit_rejected' });
+ } finally {
+ db.prepare = origPrepare;
+ }
+
+ // Losing worker got null — NOT a throw and NOT a fake-success
+ // row. Dispatcher's `if (failedRow)` will skip onFailed so
+ // the winner's onSubmitted is the only emitted side effect.
+ expect(out).toBeNull();
+
+ // Row still reflects the winner's write.
+ const final = repo.getById(s.id);
+ expect(final.status).toBe(STATUS.SUBMITTED);
+ expect(final.governanceHash).toBe('b'.repeat(64));
+ expect(final.failReason).toBeNull();
+ }
+ );
+});
+
+// ---------------- remove ----------------
+describe('remove', () => {
+ test('removes a prepared row', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(repo.remove(s.id, user1)).toBe(1);
+ expect(repo.getById(s.id)).toBeNull();
+ });
+
+ test('removes a failed row', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.markFailed(s.id, { reason: 'canceled' });
+ expect(repo.remove(s.id, user1)).toBe(1);
+ });
+
+ test('refuses to remove awaiting_collateral (confirmations in flight)', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ expect(repo.remove(s.id, user1)).toBe(0);
+ expect(repo.getById(s.id)).not.toBeNull();
+ });
+
+ test('refuses to remove submitted (permanent record)', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ repo.markSubmitted(s.id, { governanceHash: 'b'.repeat(64) });
+ expect(repo.remove(s.id, user1)).toBe(0);
+ });
+
+ test('isolates users', () => {
+ const { repo, user1, user2 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(repo.remove(s.id, user2)).toBe(0);
+ expect(repo.getById(s.id)).not.toBeNull();
+ });
+});
+
+// ---------------- listByStatus / finders ----------------
+describe('finders', () => {
+ test('listByStatus returns rows matching a given status', () => {
+ const { repo, user1 } = setup();
+ const a = repo.create(validInput(user1));
+ // Different dataHex to avoid the partial-unique-index for prepared
+ // rows (Codex round 3 P2): same user may not hold two prepared
+ // rows for identical canonical payloads.
+ const b = repo.create(
+ validInput(user1, {
+ proposalHash: 'd'.repeat(64),
+ dataHex: '7b2274797065223a327d', // {"type":2}
+ })
+ );
+ repo.attachCollateral(b.id, user1, 'c'.repeat(64));
+ expect(repo.listByStatus(STATUS.PREPARED).map((r) => r.id)).toEqual([a.id]);
+ expect(repo.listByStatus(STATUS.AWAITING_COLLATERAL).map((r) => r.id)).toEqual([
+ b.id,
+ ]);
+ });
+
+ test('listByStatus rejects unknown status', () => {
+ const { repo } = setup();
+ expectThrowsCode(() => repo.listByStatus('bogus'), 'status_invalid');
+ });
+
+ test('findByCollateralTxid finds a row by txid (lowercased)', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'A'.repeat(64));
+ expect(repo.findByCollateralTxid('a'.repeat(64)).id).toBe(s.id);
+ expect(repo.findByCollateralTxid('nonexistent')).toBeNull();
+ });
+
+ test('findByGovernanceHash finds a row by gov hash (lowercased)', () => {
+ const { repo, user1 } = setup();
+ const s = repo.create(validInput(user1));
+ repo.attachCollateral(s.id, user1, 'a'.repeat(64));
+ repo.markSubmitted(s.id, { governanceHash: 'B'.repeat(64) });
+ expect(repo.findByGovernanceHash('b'.repeat(64)).id).toBe(s.id);
+ });
+
+ test('findByProposalHashForUser scopes to owner', () => {
+ const { repo, user1, user2 } = setup();
+ const s = repo.create(validInput(user1));
+ expect(repo.findByProposalHashForUser(user1, s.proposalHash).id).toBe(s.id);
+ expect(
+ repo.findByProposalHashForUser(user2, s.proposalHash)
+ ).toBeNull();
+ });
+
+ test('findPreparedByDataHexForUser returns only prepared rows, scoped to owner (Codex round 2 P1)', () => {
+ const { repo, user1, user2 } = setup();
+ // user1: one prepared row.
+ const a = repo.create(validInput(user1));
+ // Same user, same dataHex — if we ever allowed a duplicate row,
+ // the ORDER BY created_at DESC picks the newest. We assert only
+ // that we get back a prepared row for (user, dataHex).
+ expect(
+ repo.findPreparedByDataHexForUser(user1, a.dataHex).id
+ ).toBe(a.id);
+ // Different user, same dataHex → no cross-tenant leak.
+ expect(
+ repo.findPreparedByDataHexForUser(user2, a.dataHex)
+ ).toBeNull();
+ // Unknown dataHex → null.
+ expect(
+ repo.findPreparedByDataHexForUser(user1, 'deadbeef')
+ ).toBeNull();
+ // After the row moves out of `prepared`, we no longer return it —
+ // the idempotency check in /prepare must NOT collide against
+ // awaiting_collateral / submitted / failed rows.
+ repo.attachCollateral(a.id, user1, 'd'.repeat(64));
+ expect(
+ repo.findPreparedByDataHexForUser(user1, a.dataHex)
+ ).toBeNull();
+ });
+});
+
+// ---------------- cascade ----------------
+describe('cascade on user delete', () => {
+ test('submissions for a deleted user are removed', () => {
+ const { db, repo, user1 } = setup();
+ repo.create(validInput(user1));
+ db.prepare('DELETE FROM users WHERE id = ?').run(user1);
+ expect(repo.listForUser(user1)).toEqual([]);
+ });
+});
diff --git a/lib/proposalValidate.js b/lib/proposalValidate.js
new file mode 100644
index 0000000..07e6c4b
--- /dev/null
+++ b/lib/proposalValidate.js
@@ -0,0 +1,553 @@
+'use strict';
+
+// Proposal validation + canonicalization.
+//
+// Responsibilities:
+// 1. canonicalize(input) — Normalize user input into a stable, byte-
+// deterministic dataHex. The hash commits to
+// EXACTLY these bytes, so any non-determinism
+// here (JS's default `JSON.stringify` number
+// representation, map key order, locale) would
+// break the proposal at submit time. We
+// serialize the JSON by hand.
+// 2. validateStructural() — Catch every failure mode that Syscoin Core's
+// `CProposalValidator` enforces, plus a few
+// stricter UX rules (requiring a URL scheme;
+// sane epoch bounds) so users never learn
+// about an issue for the first time after
+// paying 150 SYS.
+//
+// On-chain validation (`gObject_check`) is called SEPARATELY from the
+// route layer after structural checks pass; see routes/govProposals.js.
+// Keeping the RPC call out of this module lets tests stay pure.
+//
+// Error shape:
+// { ok: false, issues: [{ field, code, message }] }
+// where `code` is a machine-stable key the frontend maps to copy, and
+// `message` is a human-readable fallback. Multiple issues are returned
+// in one call so the UI can highlight every broken field at once.
+//
+// References (syscoin tag v4.x):
+// src/governance/governancevalidators.cpp
+// - MAX_DATA_SIZE = 512 — whole payload, post-hex-decode
+// - MAX_NAME_SIZE = 40 — after-lowercase length
+// - name charset — "-_abcdefghijklmnopqrstuvwxyz0123456789"
+// - ValidateStartEndEpoch — end > start
+// - ValidatePaymentAmount — double > 0
+// - ValidatePaymentAddress — DecodeDestination (we defer to RPC)
+// - ValidateURL — len >= 4, no whitespace, CheckURL
+// src/governance/governanceobject.h
+// - GOVERNANCE_OBJECT_PROPOSAL = 1
+
+const MAX_DATA_SIZE = 512;
+const MAX_NAME_SIZE = 40;
+const MIN_URL_SIZE = 4;
+
+const SATS_PER_SYS = 100000000n;
+
+// Codex PR8 round 15 P2: SQLite `INTEGER` is stored as a signed
+// 64-bit value, and `proposal_submissions.payment_amount_sats` uses
+// that storage class. Anything at or above 2^63 wraps into the
+// negative range on write and surfaces as a generic 500 at
+// `POST /gov/proposals/prepare` instead of a deterministic 400
+// validation error — the client then has no structured reason to
+// correct the input. Enforce the int64 ceiling here as a hard
+// validation gate so over-range amounts return `amount_too_large`
+// alongside the other structural issues. Note: Syscoin's total
+// supply cap is ~9 * 10^15 sats, so any legitimate proposal is
+// comfortably below this bound; the check exists purely to turn
+// an engine-level integer overflow into a user-actionable error.
+const MAX_PAYMENT_AMOUNT_SATS = 2n ** 63n - 1n;
+const NAME_ALLOWED_RE = /^[-_a-z0-9]+$/;
+const NAME_SANITIZE_RE = /[^-_a-z0-9]/g;
+
+// Minimal address sanity check. Syscoin addresses are either bech32
+// (sys1...) or base58 (S.../s.../3.../...). Full validation requires
+// decoding + checksum verification; we delegate that to `gObject_check`
+// on the backend node. Here we only weed out obvious garbage so we
+// fail fast before the RPC round-trip.
+const ADDRESS_RE = /^[A-Za-z0-9]{20,100}$/;
+
+// We are strictly stricter than Core's CheckURL on purpose: a
+// governance proposal URL that users paste into their wallet is
+// social-engineering-adjacent, and Core will accept `javascript:foo`
+// happily. We require an http(s) scheme and no whitespace.
+const URL_SCHEME_RE = /^https?:\/\/[^\s]{3,}$/i;
+
+// Type 1 = GOVERNANCE_OBJECT_PROPOSAL. We never emit anything else.
+const PROPOSAL_TYPE = 1;
+
+// --- Canonicalization --------------------------------------------------
+
+// Format a satoshi amount as the minimal-precision SYS decimal string
+// the JSON will contain. Never uses scientific notation. We bypass
+// `JSON.stringify` for this number specifically because node's
+// formatter emits "1e-8" for very small floats, which — while valid
+// JSON — would differ byte-for-byte depending on how the number was
+// constructed, and that would propagate into the hash.
+//
+// Examples:
+// 0n -> "0" (rejected upstream; we format anyway)
+// 100_000_000n -> "1"
+// 42_500_000_00n -> "425" (wait — see tests)
+// 4_250_000_000n -> "42.5"
+// 1n -> "0.00000001"
+// 1_000n -> "0.00001"
+function formatSysAmount(sats) {
+ const s = BigInt(sats);
+ const neg = s < 0n;
+ const abs = neg ? -s : s;
+ const whole = abs / SATS_PER_SYS;
+ const frac = abs % SATS_PER_SYS;
+ let out;
+ if (frac === 0n) {
+ out = whole.toString();
+ } else {
+ const fracStr = frac.toString().padStart(8, '0').replace(/0+$/, '');
+ out = `${whole}.${fracStr}`;
+ }
+ return neg ? `-${out}` : out;
+}
+
+// Case-insensitive input is accepted (user types "Test-Name"); Core
+// stores the lowercased form. We lowercase AND strip disallowed chars
+// so the canonical form matches what Core would compute internally.
+// The UI previews the sanitized form beside the raw input so the user
+// isn't surprised by what lands on chain.
+function sanitizeName(raw) {
+ if (typeof raw !== 'string') return '';
+ return raw.trim().toLowerCase().replace(NAME_SANITIZE_RE, '');
+}
+
+// Accept either SYS (string or number with decimals) or sats (bigint-
+// safe integer string) as input and normalize to sats. We prefer
+// string input at the API boundary because JS numbers lose precision
+// past 2^53.
+//
+// Returns bigint (sats) on success, throws on invalid.
+// Render a JS number as a decimal string WITHOUT scientific notation,
+// using the exact digits from its shortest round-trip form. This is
+// precision-preserving: `(0.00000001).toString()` is the engine's
+// chosen shortest-unambiguous decimal for the float, and we only
+// shift the decimal point per the exponent. No rounding, no digit
+// addition — the resulting string has exactly the same significant
+// digits as `n.toString()`. This lets the decimal-places check below
+// reject over-precision inputs (e.g. `0.000000009`) the same way a
+// string-input `"0.000000009"` would be rejected, instead of silently
+// rounding to 1 sat. Handles the full range of `Number.toString`
+// exponent forms (`1e-8`, `1.5e+21`, etc.).
+function numberToDecimalString(n) {
+ const s = n.toString();
+ // `e`-less forms pass straight through — the canonical case is
+ // already decimal (e.g. `1`, `1.5`, `-3.14`, `100`). JS emits
+ // exponent form only for |x| < 1e-6 or |x| >= 1e21.
+ if (!/[eE]/.test(s)) return s;
+ const m = s.match(/^(-?)(\d+)(?:\.(\d+))?[eE]([+-]?\d+)$/);
+ if (!m) {
+ // Unreachable for finite JS numbers, but guard defensively.
+ throw new Error('payment_amount has unparseable numeric form');
+ }
+ const sign = m[1];
+ const whole = m[2];
+ const frac = m[3] || '';
+ const exp = parseInt(m[4], 10);
+ const digits = whole + frac;
+ // Position of the decimal point in `digits`, counted from left.
+ // `whole.length + exp` = where the point lands after shifting.
+ const pointPos = whole.length + exp;
+ let out;
+ if (pointPos <= 0) {
+ out = '0.' + '0'.repeat(-pointPos) + digits;
+ } else if (pointPos >= digits.length) {
+ out = digits + '0'.repeat(pointPos - digits.length);
+ } else {
+ out = digits.slice(0, pointPos) + '.' + digits.slice(pointPos);
+ }
+ return sign + out;
+}
+
+function parsePaymentAmountToSats(input) {
+ if (typeof input === 'bigint') return input;
+ if (typeof input === 'number') {
+ if (!Number.isFinite(input)) throw new Error('payment_amount must be finite');
+ // Codex PR8 round 13 P2: JS `Number.toString()` falls back to
+ // scientific notation for both very small (|x| < 1e-6) and
+ // very large (|x| >= 1e21) magnitudes, which the decimal-only
+ // regex below would otherwise reject (e.g. `1e-8` — a valid
+ // 1-sat amount). Codex PR8 round 14 P1: earlier we routed
+ // through `toFixed(8)` to paper over the exponent form, but
+ // toFixed ROUNDS — `0.000000009` (9 decimals, should be
+ // rejected) became `"0.00000001"` = 1 sat silently. Correct
+ // fix is a non-mutating decimal converter that preserves the
+ // number's exact significant digits so the string path's
+ // `<= 8 decimals` check is the sole precision gate. Result:
+ // numeric `0.00000001` → `"0.00000001"` (accepted, 1 sat),
+ // numeric `0.000000009` → `"0.000000009"` (rejected, >8 dec).
+ return parsePaymentAmountToSats(numberToDecimalString(input));
+ }
+ if (typeof input !== 'string') {
+ throw new Error('payment_amount must be a number or string');
+ }
+ const s = input.trim();
+ if (!/^-?\d+(?:\.\d+)?$/.test(s)) {
+ throw new Error('payment_amount is not a decimal number');
+ }
+ const neg = s.startsWith('-');
+ const abs = neg ? s.slice(1) : s;
+ const [whole, frac = ''] = abs.split('.');
+ if (frac.length > 8) {
+ throw new Error('payment_amount has more than 8 decimal places');
+ }
+ const fracPadded = frac.padEnd(8, '0');
+ const sats = BigInt(whole) * SATS_PER_SYS + BigInt(fracPadded);
+ return neg ? -sats : sats;
+}
+
+// Escape per JSON spec (RFC 8259): quotes, backslash, control chars.
+// We avoid JSON.stringify for strings too, for symmetry with the
+// number formatter, but defer to it: JSON.stringify of a string IS
+// deterministic across engines for ASCII strings; for non-ASCII it
+// may encode as \uXXXX or as raw UTF-8 depending on the engine flags.
+// Node's v8 always emits raw UTF-8 for non-control non-ASCII. We
+// accept that as the reference form (stable within this runtime).
+function jsonString(s) {
+ return JSON.stringify(String(s));
+}
+
+// Build the canonical JSON payload. Order is fixed; omit no fields.
+// ANY change to this function (new field, different order, different
+// number format) will produce different bytes and therefore a
+// different proposal hash. Do NOT edit casually.
+function buildCanonicalJSON(p) {
+ // Epochs are emitted as integers. normalizeInput already trunc'd
+ // them to integers, but we coerce again to defend against callers
+ // that assemble the payload manually in tests.
+ const startEpoch = Math.trunc(Number(p.start_epoch));
+ const endEpoch = Math.trunc(Number(p.end_epoch));
+ return (
+ '{' +
+ `"type":${PROPOSAL_TYPE}` +
+ `,"name":${jsonString(p.name)}` +
+ `,"start_epoch":${startEpoch}` +
+ `,"end_epoch":${endEpoch}` +
+ `,"payment_address":${jsonString(p.payment_address)}` +
+ `,"payment_amount":${formatSysAmount(p.payment_amount_sats)}` +
+ `,"url":${jsonString(p.url)}` +
+ '}'
+ );
+}
+
+// Turn loose user input into a normalized payload. Returns the same
+// object shape the route will store/return; does NOT validate. (This
+// lets the wizard preview a "what the chain will see" section even
+// while the user is still editing and fields might be bad.)
+function normalizeInput(raw) {
+ const obj = raw || {};
+ const normalized = {
+ name: sanitizeName(obj.name ?? obj.title ?? ''),
+ start_epoch: Number.isFinite(Number(obj.start_epoch))
+ ? Math.trunc(Number(obj.start_epoch))
+ : null,
+ end_epoch: Number.isFinite(Number(obj.end_epoch))
+ ? Math.trunc(Number(obj.end_epoch))
+ : null,
+ payment_address:
+ typeof obj.payment_address === 'string' ? obj.payment_address.trim() : '',
+ url: typeof obj.url === 'string' ? obj.url.trim() : '',
+ // payment_amount_sats is the bigint-safe integer form. Callers
+ // that pass SYS decimals go through parsePaymentAmountToSats.
+ payment_amount_sats: null,
+ };
+ try {
+ if (obj.payment_amount_sats !== undefined) {
+ normalized.payment_amount_sats = BigInt(obj.payment_amount_sats);
+ } else if (obj.payment_amount !== undefined) {
+ normalized.payment_amount_sats = parsePaymentAmountToSats(
+ obj.payment_amount
+ );
+ } else {
+ normalized.payment_amount_sats = 0n;
+ }
+ } catch {
+ normalized.payment_amount_sats = null; // signal unparsable; structural validator will flag
+ }
+ return normalized;
+}
+
+// Main entry point: from user input, produce the canonical form plus
+// the hex-encoded payload. Throws only on truly unrecoverable shape
+// issues (missing required strings); everything else flows through
+// validateStructural so the UI can show field-level errors.
+function canonicalize(raw) {
+ const payload = normalizeInput(raw);
+ // Defensive: if payment_amount_sats failed to parse, use 0 so we
+ // can still produce a deterministic (albeit invalid) payload the
+ // validator will flag.
+ const payloadForJSON = {
+ ...payload,
+ payment_amount_sats:
+ typeof payload.payment_amount_sats === 'bigint'
+ ? payload.payment_amount_sats
+ : 0n,
+ start_epoch: payload.start_epoch ?? 0,
+ end_epoch: payload.end_epoch ?? 0,
+ };
+ const json = buildCanonicalJSON(payloadForJSON);
+ const dataHex = Buffer.from(json, 'utf8').toString('hex');
+ return {
+ payload,
+ json,
+ dataHex,
+ byteLength: Buffer.byteLength(json, 'utf8'),
+ };
+}
+
+// --- Validation -------------------------------------------------------
+
+function issue(field, code, message) {
+ return { field, code, message };
+}
+
+function validateStructural(canon, { nowSeconds } = {}) {
+ const issues = [];
+ const { payload, byteLength } = canon;
+
+ // Name
+ if (!payload.name) {
+ issues.push(issue('name', 'name_required', 'Name is required.'));
+ } else if (payload.name.length > MAX_NAME_SIZE) {
+ issues.push(
+ issue(
+ 'name',
+ 'name_too_long',
+ `Name must be ${MAX_NAME_SIZE} characters or fewer.`
+ )
+ );
+ } else if (!NAME_ALLOWED_RE.test(payload.name)) {
+ issues.push(
+ issue(
+ 'name',
+ 'name_invalid_chars',
+ 'Name may only contain lowercase letters, digits, dashes, and underscores.'
+ )
+ );
+ }
+
+ // Epochs
+ if (!Number.isInteger(payload.start_epoch) || payload.start_epoch <= 0) {
+ issues.push(
+ issue('start_epoch', 'epoch_missing', 'A valid start time is required.')
+ );
+ }
+ if (!Number.isInteger(payload.end_epoch) || payload.end_epoch <= 0) {
+ issues.push(
+ issue('end_epoch', 'epoch_missing', 'A valid end time is required.')
+ );
+ }
+ if (
+ Number.isInteger(payload.start_epoch) &&
+ Number.isInteger(payload.end_epoch) &&
+ payload.end_epoch <= payload.start_epoch
+ ) {
+ issues.push(
+ issue(
+ 'end_epoch',
+ 'epoch_order',
+ 'End time must be after the start time.'
+ )
+ );
+ }
+ // Reject obviously-expired end_epoch when nowSeconds is provided.
+ // Core enforces this in gObject_check (fCheckExpiration=true), but
+ // doing it here gives a nicer error and removes an RPC round-trip.
+ if (
+ Number.isInteger(payload.end_epoch) &&
+ Number.isInteger(nowSeconds) &&
+ payload.end_epoch <= nowSeconds
+ ) {
+ issues.push(
+ issue(
+ 'end_epoch',
+ 'epoch_past',
+ 'End time is in the past; proposals must close in the future.'
+ )
+ );
+ }
+
+ // Amount
+ if (
+ typeof payload.payment_amount_sats !== 'bigint' ||
+ payload.payment_amount_sats <= 0n
+ ) {
+ issues.push(
+ issue(
+ 'payment_amount',
+ 'amount_not_positive',
+ 'Payment amount must be greater than zero.'
+ )
+ );
+ } else if (payload.payment_amount_sats > MAX_PAYMENT_AMOUNT_SATS) {
+ // See comment on MAX_PAYMENT_AMOUNT_SATS — rejecting here turns
+ // a SQLite int64 overflow-on-write (generic 500) into a clean,
+ // repeatable 400 the user can actually correct. We keep the
+ // `amount_not_positive` branch separate so the frontend can
+ // map each failure mode to distinct copy.
+ issues.push(
+ issue(
+ 'payment_amount',
+ 'amount_too_large',
+ 'Payment amount exceeds the maximum supported value.'
+ )
+ );
+ }
+
+ // Address (sanity — real validation at RPC)
+ if (!payload.payment_address) {
+ issues.push(
+ issue('payment_address', 'address_required', 'Payment address is required.')
+ );
+ } else if (/\s/.test(payload.payment_address)) {
+ issues.push(
+ issue(
+ 'payment_address',
+ 'address_whitespace',
+ 'Payment address cannot contain spaces.'
+ )
+ );
+ } else if (!ADDRESS_RE.test(payload.payment_address)) {
+ issues.push(
+ issue(
+ 'payment_address',
+ 'address_invalid',
+ 'Payment address doesn\u2019t look like a valid Syscoin address.'
+ )
+ );
+ }
+
+ // URL
+ if (!payload.url) {
+ issues.push(issue('url', 'url_required', 'A URL is required.'));
+ } else if (/\s/.test(payload.url)) {
+ issues.push(issue('url', 'url_whitespace', 'URL cannot contain spaces.'));
+ } else if (payload.url.length < MIN_URL_SIZE) {
+ issues.push(issue('url', 'url_too_short', 'URL is too short.'));
+ } else if (!URL_SCHEME_RE.test(payload.url)) {
+ issues.push(
+ issue(
+ 'url',
+ 'url_scheme',
+ 'URL must start with http:// or https://.'
+ )
+ );
+ }
+
+ // Payload size — hard consensus limit. This is the reason the UI
+ // must show a byte counter in the Review step.
+ if (byteLength > MAX_DATA_SIZE) {
+ issues.push(
+ issue(
+ '_payload',
+ 'payload_too_large',
+ `Proposal exceeds the 512-byte on-chain limit (currently ${byteLength} bytes). Shorten the name, URL, or address.`
+ )
+ );
+ }
+
+ return issues.length === 0
+ ? { ok: true, issues: [] }
+ : { ok: false, issues };
+}
+
+// Maps Core's freeform "Invalid X;Invalid Y" error strings to our
+// structured codes. Core concatenates reasons with ";" so multiple
+// failures can come back in one message.
+function parseCoreRejectMessage(raw) {
+ const msg = String(raw || '');
+ const out = [];
+ if (/name exceeds/i.test(msg))
+ out.push(issue('name', 'name_too_long', 'Name exceeds 40 characters.'));
+ if (/name.*empty/i.test(msg))
+ out.push(issue('name', 'name_required', 'Name cannot be empty.'));
+ if (/name contains invalid/i.test(msg))
+ out.push(
+ issue('name', 'name_invalid_chars', 'Name contains invalid characters.')
+ );
+ if (/start_epoch|end_epoch|end_epoch <= start_epoch/i.test(msg))
+ out.push(
+ issue(
+ 'end_epoch',
+ 'epoch_order',
+ 'Start/end times are invalid or out of order.'
+ )
+ );
+ if (/expired/i.test(msg))
+ out.push(
+ issue('end_epoch', 'epoch_past', 'End time must be in the future.')
+ );
+ if (/payment_amount is negative|payment_amount.*not found/i.test(msg))
+ out.push(
+ issue(
+ 'payment_amount',
+ 'amount_not_positive',
+ 'Payment amount must be greater than zero.'
+ )
+ );
+ if (
+ /payment_address is invalid|payment_address.*not found|payment_address can't have whitespaces/i.test(
+ msg
+ )
+ )
+ out.push(
+ issue(
+ 'payment_address',
+ 'address_invalid',
+ 'Payment address is not a valid Syscoin address.'
+ )
+ );
+ if (/script addresses are not supported/i.test(msg))
+ out.push(
+ issue(
+ 'payment_address',
+ 'address_script_type',
+ 'This type of address cannot receive governance payments.'
+ )
+ );
+ if (/url.*whitespaces|url too short|url invalid|url.*not found/i.test(msg))
+ out.push(
+ issue('url', 'url_invalid', 'URL is not valid.')
+ );
+ if (/data exceeds/i.test(msg))
+ out.push(
+ issue(
+ '_payload',
+ 'payload_too_large',
+ 'Proposal exceeds the 512-byte on-chain limit.'
+ )
+ );
+ if (/type is not 1|type field not found/i.test(msg))
+ out.push(issue('_payload', 'type_invalid', 'Invalid proposal type.'));
+
+ // If we couldn't classify, surface the raw message as a catch-all so
+ // the user at least sees something actionable.
+ if (out.length === 0 && msg.trim()) {
+ out.push(issue('_payload', 'core_rejected', msg.trim()));
+ }
+ return out;
+}
+
+module.exports = {
+ MAX_DATA_SIZE,
+ MAX_NAME_SIZE,
+ MIN_URL_SIZE,
+ SATS_PER_SYS,
+ MAX_PAYMENT_AMOUNT_SATS,
+ PROPOSAL_TYPE,
+ formatSysAmount,
+ sanitizeName,
+ parsePaymentAmountToSats,
+ normalizeInput,
+ canonicalize,
+ buildCanonicalJSON,
+ validateStructural,
+ parseCoreRejectMessage,
+};
diff --git a/lib/proposalValidate.test.js b/lib/proposalValidate.test.js
new file mode 100644
index 0000000..9b83200
--- /dev/null
+++ b/lib/proposalValidate.test.js
@@ -0,0 +1,562 @@
+'use strict';
+
+const {
+ MAX_DATA_SIZE,
+ MAX_NAME_SIZE,
+ SATS_PER_SYS,
+ formatSysAmount,
+ sanitizeName,
+ parsePaymentAmountToSats,
+ normalizeInput,
+ canonicalize,
+ buildCanonicalJSON,
+ validateStructural,
+ parseCoreRejectMessage,
+} = require('./proposalValidate');
+
+const validInput = {
+ name: 'Test-Proposal',
+ url: 'https://example.org/p',
+ payment_address: 'sys1q9h6mlnq2mwmlyyz4wa3q69lzq7h6mlsfqsp7mt',
+ payment_amount: '42.5',
+ start_epoch: 1800000000,
+ end_epoch: 1802592000,
+};
+
+// -----------------------------------------------------------------------
+// formatSysAmount — canonical decimal representation
+// -----------------------------------------------------------------------
+describe('formatSysAmount', () => {
+ test('whole SYS has no decimal point', () => {
+ expect(formatSysAmount(0n)).toBe('0');
+ expect(formatSysAmount(SATS_PER_SYS)).toBe('1');
+ expect(formatSysAmount(150n * SATS_PER_SYS)).toBe('150');
+ });
+
+ test('fractional SYS drops trailing zeros', () => {
+ expect(formatSysAmount(10000000n)).toBe('0.1');
+ expect(formatSysAmount(50000000n)).toBe('0.5');
+ expect(formatSysAmount(4250000000n)).toBe('42.5');
+ });
+
+ test('one satoshi is 0.00000001, never scientific', () => {
+ expect(formatSysAmount(1n)).toBe('0.00000001');
+ expect(formatSysAmount(10n)).toBe('0.0000001');
+ });
+
+ test('huge values don\u2019t overflow', () => {
+ const big = BigInt('99999999') * SATS_PER_SYS;
+ expect(formatSysAmount(big)).toBe('99999999');
+ });
+
+ test('accepts number/string inputs by coercion to BigInt', () => {
+ expect(formatSysAmount(100000000)).toBe('1');
+ expect(formatSysAmount('100000000')).toBe('1');
+ });
+});
+
+// -----------------------------------------------------------------------
+// parsePaymentAmountToSats — bigint-safe SYS -> sats
+// -----------------------------------------------------------------------
+describe('parsePaymentAmountToSats', () => {
+ test('simple whole SYS', () => {
+ expect(parsePaymentAmountToSats('1')).toBe(SATS_PER_SYS);
+ expect(parsePaymentAmountToSats(1)).toBe(SATS_PER_SYS);
+ });
+
+ test('fractional SYS', () => {
+ expect(parsePaymentAmountToSats('42.5')).toBe(4250000000n);
+ expect(parsePaymentAmountToSats('0.1')).toBe(10000000n);
+ expect(parsePaymentAmountToSats('0.00000001')).toBe(1n);
+ });
+
+ test('rejects more than 8 decimal places', () => {
+ expect(() => parsePaymentAmountToSats('0.123456789')).toThrow(
+ /more than 8 decimal/
+ );
+ });
+
+ test('rejects non-numeric strings', () => {
+ expect(() => parsePaymentAmountToSats('abc')).toThrow(/not a decimal/);
+ expect(() => parsePaymentAmountToSats('')).toThrow(/not a decimal/);
+ });
+
+ test('accepts BigInt directly', () => {
+ expect(parsePaymentAmountToSats(123n)).toBe(123n);
+ });
+
+ // Codex PR8 round 13 P2: JS Number.toString() uses scientific
+ // notation for magnitudes outside ~[1e-6, 1e21), so the numeric
+ // input branch must route through a decimal form before the
+ // regex check. Otherwise valid sats-scale amounts are rejected
+ // as `payment_amount is not a decimal number` even though the
+ // function documents support for numeric inputs.
+ test('numeric input that stringifies to scientific notation', () => {
+ // 1 sat as a JS number: (0.00000001).toString() === "1e-8"
+ expect(parsePaymentAmountToSats(0.00000001)).toBe(1n);
+ // 2 sats: (0.00000002).toString() === "2e-8"
+ expect(parsePaymentAmountToSats(0.00000002)).toBe(2n);
+ // A middling exponent magnitude that also defaults to exp form
+ // in some engines: 5e-7 === 50 sats
+ expect(parsePaymentAmountToSats(0.0000005)).toBe(50n);
+ // And plain decimals still work identically.
+ expect(parsePaymentAmountToSats(1.5)).toBe(150000000n);
+ });
+
+ test('numeric zero and negative zero normalize to 0n sats', () => {
+ expect(parsePaymentAmountToSats(0)).toBe(0n);
+ expect(parsePaymentAmountToSats(-0)).toBe(0n);
+ });
+
+ // Codex PR8 round 14 P1: numeric inputs with more than 8 decimal
+ // places must be REJECTED, matching the string path's behavior.
+ // Earlier code routed through `toFixed(8)` which silently rounded
+ // — `0.000000009` became 1 sat and `1.999999999` became 2 SYS,
+ // so the on-chain payment_amount could differ from what the
+ // client sent. Correct behavior: throw so the caller's /prepare
+ // or /drafts PATCH surfaces a `validation_failed` issue and the
+ // user re-enters a representable amount.
+ test('numeric over-precision is rejected, not silently rounded', () => {
+ expect(() => parsePaymentAmountToSats(0.000000009)).toThrow(
+ /more than 8 decimal/
+ );
+ // 9 decimals — mid-range, also scientific-notation territory
+ // on some engines. Must throw.
+ expect(() => parsePaymentAmountToSats(0.0000000099)).toThrow(
+ /more than 8 decimal/
+ );
+ // 10 decimals in the "middle" (no exponent form in toString),
+ // still must throw. 1.0000000001 = 1 + 1e-10; toString yields
+ // "1.0000000001".
+ expect(() => parsePaymentAmountToSats(1.0000000001)).toThrow(
+ /more than 8 decimal/
+ );
+ });
+
+ test('numeric values at exactly 8 decimals are preserved, not rounded', () => {
+ // 1 sat must remain 1 sat, not get rounded to a neighbor by a
+ // faulty normalization path.
+ expect(parsePaymentAmountToSats(0.00000001)).toBe(1n);
+ // 1.00000001 SYS = 100_000_001 sats. Precision at 8 decimals.
+ // This value is exactly representable as a sum of powers of 2?
+ // Close enough via JS; what matters is the accepted decimal
+ // representation from toString preserves 8 digits without
+ // introducing a 9th.
+ // Note: (1.00000001).toString() === "1.00000001" on V8.
+ expect(parsePaymentAmountToSats(1.00000001)).toBe(100000001n);
+ });
+});
+
+// -----------------------------------------------------------------------
+// sanitizeName — Core's rule: lowercase, strip chars outside [-_a-z0-9]
+// -----------------------------------------------------------------------
+describe('sanitizeName', () => {
+ test('lowercases input', () => {
+ expect(sanitizeName('HELLO')).toBe('hello');
+ });
+
+ test('preserves dashes and underscores', () => {
+ expect(sanitizeName('my-proposal_1')).toBe('my-proposal_1');
+ });
+
+ test('strips spaces and symbols', () => {
+ expect(sanitizeName('My Test! @#$')).toBe('mytest');
+ });
+
+ test('returns empty string for garbage input', () => {
+ expect(sanitizeName('!!!')).toBe('');
+ expect(sanitizeName(null)).toBe('');
+ expect(sanitizeName(undefined)).toBe('');
+ expect(sanitizeName(42)).toBe('');
+ });
+});
+
+// -----------------------------------------------------------------------
+// normalizeInput — glue between user input and the canonical form
+// -----------------------------------------------------------------------
+describe('normalizeInput', () => {
+ test('accepts SYS decimal and converts to sats', () => {
+ const n = normalizeInput({ ...validInput });
+ expect(n.payment_amount_sats).toBe(4250000000n);
+ });
+
+ test('accepts payment_amount_sats directly (BigInt-safe)', () => {
+ const n = normalizeInput({
+ ...validInput,
+ payment_amount: undefined,
+ payment_amount_sats: '999999999999',
+ });
+ expect(n.payment_amount_sats).toBe(999999999999n);
+ });
+
+ test('sanitizes name', () => {
+ const n = normalizeInput({ ...validInput, name: 'Some Title!!' });
+ expect(n.name).toBe('sometitle');
+ });
+
+ test('trims strings', () => {
+ const n = normalizeInput({
+ ...validInput,
+ payment_address: ' sys1qabc ',
+ url: ' https://x.co ',
+ });
+ expect(n.payment_address).toBe('sys1qabc');
+ expect(n.url).toBe('https://x.co');
+ });
+
+ test('sets payment_amount_sats to null on unparsable input (so validator flags it)', () => {
+ const n = normalizeInput({ ...validInput, payment_amount: 'not a number' });
+ expect(n.payment_amount_sats).toBeNull();
+ });
+});
+
+// -----------------------------------------------------------------------
+// canonicalize / buildCanonicalJSON — determinism
+// -----------------------------------------------------------------------
+describe('canonicalize — determinism', () => {
+ test('fixed key order: type, name, start_epoch, end_epoch, address, amount, url', () => {
+ const { json } = canonicalize(validInput);
+ const keyOrder = json.match(/"(\w+)"\s*:/g).map((m) => m.match(/"(\w+)"/)[1]);
+ expect(keyOrder).toEqual([
+ 'type',
+ 'name',
+ 'start_epoch',
+ 'end_epoch',
+ 'payment_address',
+ 'payment_amount',
+ 'url',
+ ]);
+ });
+
+ test('two identical inputs produce byte-identical output', () => {
+ const a = canonicalize(validInput);
+ const b = canonicalize(validInput);
+ expect(a.json).toBe(b.json);
+ expect(a.dataHex).toBe(b.dataHex);
+ });
+
+ test('input key order does not affect output bytes', () => {
+ const reversed = {};
+ for (const k of Object.keys(validInput).reverse()) reversed[k] = validInput[k];
+ expect(canonicalize(reversed).json).toBe(canonicalize(validInput).json);
+ });
+
+ test('type is always 1', () => {
+ const { json } = canonicalize(validInput);
+ expect(json).toMatch(/"type":1,/);
+ });
+
+ test('byteLength matches UTF-8 byte count of the JSON', () => {
+ const c = canonicalize(validInput);
+ expect(c.byteLength).toBe(Buffer.byteLength(c.json, 'utf8'));
+ });
+
+ test('dataHex is a valid even-length lowercase hex string', () => {
+ const { dataHex } = canonicalize(validInput);
+ expect(dataHex).toMatch(/^[0-9a-f]+$/);
+ expect(dataHex.length % 2).toBe(0);
+ });
+
+ test('UTF-8 encoding: multi-byte chars in name are counted correctly', () => {
+ // Names are sanitized so only ASCII survives, but URLs can have
+ // unicode. Verify byte counting is UTF-8 aware.
+ const c = canonicalize({ ...validInput, url: 'https://example.org/\u00e9' });
+ // "é" is 2 bytes in UTF-8; the JSON string would contain the escape
+ // or the raw bytes. Either way byteLength should match Buffer.
+ expect(c.byteLength).toBe(Buffer.byteLength(c.json, 'utf8'));
+ });
+});
+
+describe('buildCanonicalJSON — number precision', () => {
+ test('payment_amount round-trips as decimal SYS, never scientific', () => {
+ const one_sat = canonicalize({ ...validInput, payment_amount: '0.00000001' }).json;
+ expect(one_sat).toContain('"payment_amount":0.00000001');
+ expect(one_sat).not.toContain('e-');
+ });
+
+ test('whole-SYS amounts have no decimal point', () => {
+ const hundred = canonicalize({ ...validInput, payment_amount: '100' }).json;
+ expect(hundred).toContain('"payment_amount":100');
+ });
+
+ test('epochs are emitted as integer literals (no decimal point)', () => {
+ const { json } = canonicalize({
+ ...validInput,
+ start_epoch: 1700000000,
+ end_epoch: 1800000000,
+ });
+ expect(json).toContain('"start_epoch":1700000000,');
+ expect(json).toContain('"end_epoch":1800000000,');
+ });
+});
+
+// -----------------------------------------------------------------------
+// validateStructural — Core's rules + our UX-stricter ones
+// -----------------------------------------------------------------------
+describe('validateStructural — happy path', () => {
+ test('valid input passes', () => {
+ const c = canonicalize(validInput);
+ expect(validateStructural(c).ok).toBe(true);
+ });
+
+ test('maximum-size valid payload passes', () => {
+ // Push name to 40 chars; url length within what still fits.
+ const c = canonicalize({
+ ...validInput,
+ name: 'x'.repeat(40),
+ });
+ expect(validateStructural(c).ok).toBe(true);
+ });
+});
+
+describe('validateStructural — name', () => {
+ test('empty name', () => {
+ const c = canonicalize({ ...validInput, name: '' });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ expect(r.issues.find((i) => i.code === 'name_required')).toBeTruthy();
+ });
+
+ test('name > 40 chars', () => {
+ // 40-char limit applies after sanitize; feed 50 alphanumerics.
+ const raw = 'a'.repeat(50);
+ const c = canonicalize({ ...validInput, name: raw });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ expect(r.issues.find((i) => i.code === 'name_too_long')).toBeTruthy();
+ });
+
+ test('name with invalid chars is auto-sanitized; empty after sanitize fails', () => {
+ const c = canonicalize({ ...validInput, name: '!!!' });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ expect(r.issues.find((i) => i.code === 'name_required')).toBeTruthy();
+ });
+});
+
+describe('validateStructural — epochs', () => {
+ test('missing epochs', () => {
+ const c = canonicalize({ ...validInput, start_epoch: null, end_epoch: null });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ expect(r.issues.some((i) => i.code === 'epoch_missing')).toBe(true);
+ });
+
+ test('end <= start', () => {
+ const c = canonicalize({
+ ...validInput,
+ start_epoch: 1800000000,
+ end_epoch: 1800000000,
+ });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'epoch_order')).toBeTruthy();
+ });
+
+ test('end in the past (when nowSeconds provided)', () => {
+ const c = canonicalize({
+ ...validInput,
+ start_epoch: 1700000000,
+ end_epoch: 1700001000,
+ });
+ const r = validateStructural(c, { nowSeconds: 1800000000 });
+ expect(r.issues.find((i) => i.code === 'epoch_past')).toBeTruthy();
+ });
+});
+
+describe('validateStructural — amount', () => {
+ test('zero', () => {
+ const c = canonicalize({ ...validInput, payment_amount: '0' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'amount_not_positive')).toBeTruthy();
+ });
+
+ test('unparsable amount', () => {
+ const c = canonicalize({ ...validInput, payment_amount: 'xyz' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'amount_not_positive')).toBeTruthy();
+ });
+
+ test('tiny positive amount is fine', () => {
+ const c = canonicalize({ ...validInput, payment_amount: '0.00000001' });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(true);
+ });
+
+ // Codex PR8 round 15 P2: payment amounts at or above int64_max
+ // (2^63) would overflow the SQLite INTEGER column during
+ // `proposalSubmissions.create`, surfacing as a generic 500 at
+ // /gov/proposals/prepare instead of a deterministic 400. Assert
+ // structural validation now catches this before persistence with
+ // a dedicated `amount_too_large` code.
+ test('amount at int64_max is accepted (exactly representable)', () => {
+ // 2^63 - 1 sats — the largest value the DB column can hold.
+ // Business-nonsensical for SYS but MUST pass structural checks
+ // so we do not reject a valid-at-the-storage-level payload.
+ const c = canonicalize({
+ ...validInput,
+ payment_amount: '92233720368.54775807',
+ });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(true);
+ });
+
+ test('amount above int64_max is rejected with amount_too_large', () => {
+ // 2^63 sats — one past int64_max; would wrap negative on write.
+ const c = canonicalize({
+ ...validInput,
+ payment_amount: '92233720368.54775808',
+ });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ const codes = new Set(r.issues.map((i) => i.code));
+ expect(codes.has('amount_too_large')).toBe(true);
+ // Must NOT also fire amount_not_positive (the two failure
+ // modes are mutually exclusive by design so the frontend can
+ // map each to distinct copy).
+ expect(codes.has('amount_not_positive')).toBe(false);
+ });
+
+ test('enormous amount well past int64_max is still a clean validation failure', () => {
+ // A user-typed absurd value — the kind of thing the reviewer
+ // called out (9223372036854775808 sats, ~9.22e18). parsing
+ // succeeds (BigInt is unbounded); structural gate catches it.
+ const c = canonicalize({
+ ...validInput,
+ // 10^20 sats = 10^12 SYS. JSON-safe, parses fine, overflows
+ // SQLite on write without the new gate.
+ payment_amount: '1000000000000',
+ });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ expect(
+ r.issues.find((i) => i.code === 'amount_too_large')
+ ).toBeTruthy();
+ });
+});
+
+describe('validateStructural — address', () => {
+ test('empty', () => {
+ const c = canonicalize({ ...validInput, payment_address: '' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'address_required')).toBeTruthy();
+ });
+
+ test('contains whitespace', () => {
+ const c = canonicalize({
+ ...validInput,
+ payment_address: 'sys1q xyz abc',
+ });
+ const r = validateStructural(c);
+ expect(
+ r.issues.find(
+ (i) => i.code === 'address_whitespace' || i.code === 'address_invalid'
+ )
+ ).toBeTruthy();
+ });
+
+ test('fails sanity regex (too short)', () => {
+ const c = canonicalize({ ...validInput, payment_address: 'abc' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'address_invalid')).toBeTruthy();
+ });
+});
+
+describe('validateStructural — url', () => {
+ test('empty', () => {
+ const c = canonicalize({ ...validInput, url: '' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'url_required')).toBeTruthy();
+ });
+
+ test('too short', () => {
+ const c = canonicalize({ ...validInput, url: 'a' });
+ const r = validateStructural(c);
+ expect(
+ r.issues.find((i) => i.code === 'url_too_short' || i.code === 'url_scheme')
+ ).toBeTruthy();
+ });
+
+ test('missing scheme', () => {
+ const c = canonicalize({ ...validInput, url: 'example.org' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'url_scheme')).toBeTruthy();
+ });
+
+ test('javascript: URL rejected by our stricter rule', () => {
+ const c = canonicalize({ ...validInput, url: 'javascript:alert(1)' });
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'url_scheme')).toBeTruthy();
+ });
+
+ test('whitespace in url', () => {
+ const c = canonicalize({ ...validInput, url: 'https://ex ample.com' });
+ const r = validateStructural(c);
+ expect(
+ r.issues.find((i) => i.code === 'url_whitespace' || i.code === 'url_scheme')
+ ).toBeTruthy();
+ });
+});
+
+describe('validateStructural — payload size', () => {
+ test('payload over 512 bytes rejected', () => {
+ // 40-char name + long URL should still overflow because the
+ // address + scaffolding plus a bloated URL can exceed 512.
+ const longUrl = 'https://example.org/' + 'x'.repeat(500);
+ const c = canonicalize({ ...validInput, url: longUrl });
+ expect(c.byteLength).toBeGreaterThan(MAX_DATA_SIZE);
+ const r = validateStructural(c);
+ expect(r.issues.find((i) => i.code === 'payload_too_large')).toBeTruthy();
+ });
+});
+
+describe('validateStructural — multiple issues surfaced together', () => {
+ test('returns all issues in one call', () => {
+ const c = canonicalize({
+ ...validInput,
+ name: '',
+ url: '',
+ payment_amount: '0',
+ });
+ const r = validateStructural(c);
+ expect(r.ok).toBe(false);
+ const codes = new Set(r.issues.map((i) => i.code));
+ expect(codes.has('name_required')).toBe(true);
+ expect(codes.has('url_required')).toBe(true);
+ expect(codes.has('amount_not_positive')).toBe(true);
+ });
+});
+
+// -----------------------------------------------------------------------
+// parseCoreRejectMessage — translate Core's error strings
+// -----------------------------------------------------------------------
+describe('parseCoreRejectMessage', () => {
+ test('maps multiple concatenated reasons', () => {
+ const out = parseCoreRejectMessage(
+ 'Invalid name;name exceeds 40 characters;Invalid URL;url too short;'
+ );
+ const codes = out.map((i) => i.code);
+ expect(codes).toContain('name_too_long');
+ expect(codes).toContain('url_invalid');
+ });
+
+ test('maps script-address rejection', () => {
+ const out = parseCoreRejectMessage('script addresses are not supported;');
+ expect(out[0].code).toBe('address_script_type');
+ });
+
+ test('maps data-exceeds-max-size', () => {
+ const out = parseCoreRejectMessage('data exceeds 512 characters;');
+ expect(out[0].code).toBe('payload_too_large');
+ });
+
+ test('falls back to raw message when nothing matches', () => {
+ const out = parseCoreRejectMessage('something weird we did not anticipate');
+ expect(out[0].code).toBe('core_rejected');
+ expect(out[0].message).toMatch(/something weird/);
+ });
+
+ test('returns [] for empty/null input', () => {
+ expect(parseCoreRejectMessage('')).toEqual([]);
+ expect(parseCoreRejectMessage(null)).toEqual([]);
+ });
+});
diff --git a/routes/govProposals.js b/routes/govProposals.js
new file mode 100644
index 0000000..6d8c859
--- /dev/null
+++ b/routes/govProposals.js
@@ -0,0 +1,1157 @@
+'use strict';
+
+// /gov/proposals — HTTP surface for the governance proposal wizard.
+//
+// Shape of the feature (front-to-back):
+//
+// 1. User opens the wizard.
+// 2. User drafts content. `POST /drafts` / `PATCH /drafts/:id` persist
+// whatever they typed (title, description, name, url, amount, ...).
+// Drafts are server-side so the same user on another device picks
+// up where they left off — no banners, the drafts list on the
+// Governance page is enough.
+// 3. When the user clicks "Continue → Review", the client sends the
+// full payload to `POST /prepare`. We canonicalize it, compute the
+// `proposal_hash` that the collateral OP_RETURN must commit to,
+// call `gObject_check` on the node as a pre-flight, and persist a
+// `proposal_submissions` row in `prepared` state. The response
+// gives the client everything it needs to build the 150 SYS
+// collateral PSBT (hash, amount, parent/time/revision).
+// 4. The user pays the 150 SYS fee — either through Pali
+// (`sys_signAndSend` on a client-built PSBT) or manually via the
+// Syscoin-Qt console. Either path ends with the user's wallet
+// holding a txid.
+// 5. The client calls `POST /submissions/:id/attach-collateral` with
+// that txid. We flip the row to `awaiting_collateral` and the
+// dispatcher takes over: it polls `getRawTransaction`, waits for
+// 6 confirmations, then calls `gObject_submit`. On success the
+// row becomes `submitted` with the governance hash recorded.
+// 6. The status page polls `GET /submissions/:id` during the wait;
+// the user sees a live confs counter and an ETA.
+//
+// Injectables (factory args):
+//
+// - drafts : proposal_drafts repo
+// - submissions : proposal_submissions repo
+// - sessionMw : auth middleware (exposes req.user)
+// - csrfMw : CSRF protection
+// - rpc : object with async methods:
+// - gObjectCheck(dataHex) → any
+// (getRawTransaction / gObjectSubmit live on
+// the dispatcher's rpc object, not here — the
+// route layer only needs pre-flight check.)
+// gObjectCheck is optional; when absent we
+// skip the RPC pre-flight and fall back to
+// structural validation only.
+// - runAtomic : db.transaction wrapper (from appFactory) —
+// used to atomically "create submission AND
+// delete consumed draft" so a crash between
+// the two can't leave an orphan draft.
+// - now : injectable clock (ms).
+// - maxDraftsPerUser : soft cap (default 50). Prevents a single
+// account from squatting on hundreds of drafts.
+// - maxPaymentCount : soft cap on the "how many monthly payments"
+// display field (default 60 = five years).
+// Core has NO bound on payment_count because
+// the field isn't on-chain; we sanity-check it
+// here so the wizard can't send "1,000,000".
+//
+// Error response contract (kept stable for the frontend):
+//
+// 400 { error: 'validation_failed', issues: [{ field, code, message }] }
+// 400 { error: 'bad_request', detail?: string }
+// 401 { error: 'unauthorized' }
+// 403 { error: 'csrf_missing' | 'csrf_invalid' }
+// 404 { error: 'not_found' }
+// 409 { error: 'conflict', reason: }
+// 422 { error: 'core_rejected', issues: [{ field, code, message }] }
+// 500 { error: 'internal' }
+
+const express = require('express');
+
+const proposalValidate = require('../lib/proposalValidate');
+const { computeProposalHash } = require('../lib/proposalHash');
+
+const HEX64 = /^[0-9a-f]{64}$/i;
+
+// 150 SYS, hardcoded in Core's src/governance/governanceobject.h as
+// GOVERNANCE_PROPOSAL_FEE_TX. BigInt so downstream JSON serializers
+// never accidentally lose precision.
+const COLLATERAL_FEE_SATS = 15000000000n;
+
+// Kept in lock-step with lib/proposalDispatcher.REQUIRED_CONFS so the
+// status page can render "x of 6 confs" without an extra round-trip
+// just to learn the threshold.
+const REQUIRED_CONFIRMATIONS = 6;
+
+const DEFAULT_MAX_DRAFTS_PER_USER = 50;
+const DEFAULT_MAX_PAYMENT_COUNT = 60;
+
+// ---------------------------------------------------------------------
+// Small helpers kept local — they aren't reusable across other routes
+// and hoisting them out would just add indirection.
+// ---------------------------------------------------------------------
+
+// Accept a body shape with either camelCase (what the wizard sends)
+// or snake_case (what the canonicalizer expects). Normalize to the
+// snake_case form `proposalValidate` wants. We keep this tolerant on
+// purpose: the wizard is JSON-shape-strict, but HTTP clients that
+// humans might build in the future (e.g. a CLI) will naturally reach
+// for snake_case — this layer hides the difference.
+function readProposalFields(body) {
+ const b = body || {};
+ const pick = (camel, snake) => (b[camel] !== undefined ? b[camel] : b[snake]);
+ return {
+ title: pick('title', 'title'),
+ description: pick('description', 'description'),
+ name: pick('name', 'name'),
+ url: pick('url', 'url'),
+ paymentAddress: pick('paymentAddress', 'payment_address'),
+ paymentAmountSats: pick('paymentAmountSats', 'payment_amount_sats'),
+ paymentAmount: pick('paymentAmount', 'payment_amount'),
+ paymentCount: pick('paymentCount', 'payment_count'),
+ startEpoch: pick('startEpoch', 'start_epoch'),
+ endEpoch: pick('endEpoch', 'end_epoch'),
+ };
+}
+
+// Shape + safety validation for `payment_amount_sats` taken from the
+// wire. Returns `{ ok: true, value: BigInt }` on success, or
+// `{ ok: false, code, message }` with a stable machine-key code that
+// callers wrap into their own `validation_failed` envelope.
+//
+// Accepted shapes:
+// - BigInt: must be >= 0n. Forwarded verbatim.
+// - string: must match `/^(0|[1-9][0-9]*)$/` (digit-only, no leading
+// zeros except "0", no decimal/sign/whitespace/exponent). This is
+// the recommended wire form for large amounts — strings carry
+// arbitrary precision, unlike JSON numbers.
+// - number: must be a SAFE integer (`Number.isSafeInteger`) and
+// >= 0. Codex PR8 round 17 P2: JS JSON.parse silently rounds
+// integers above `Number.MAX_SAFE_INTEGER (2^53 - 1)` at parse
+// time. If we accepted `Number.isInteger` alone and then
+// `BigInt(n)`'d, a caller that sent `9007199254740993` would
+// see us persist `9007199254740992` (or some other nearby
+// double-representable value) — a silent mismatch between the
+// bytes the user signed and what the server canonicalizes /
+// hashes. Forcing safe-integer input means callers MUST send
+// large amounts as digit strings, where `BigInt` parses them
+// without loss. Anything above int64 is still caught by the
+// MAX_PAYMENT_AMOUNT_SATS gate downstream, but that gate runs
+// AFTER the lossy number parse, so the safe-integer check has
+// to happen here — not there.
+function parsePaymentAmountSatsInput(sats) {
+ if (typeof sats === 'bigint') {
+ if (sats < 0n) {
+ return {
+ ok: false,
+ code: 'amount_sats_invalid',
+ message: 'payment_amount_sats must be non-negative.',
+ };
+ }
+ return { ok: true, value: sats };
+ }
+ if (typeof sats === 'number') {
+ if (!Number.isSafeInteger(sats)) {
+ // Covers: non-finite, non-integer, and integers above 2^53-1
+ // that JSON.parse already rounded. Clients wanting amounts at
+ // or above Number.MAX_SAFE_INTEGER must send a digit string.
+ return {
+ ok: false,
+ code: 'amount_sats_unsafe_number',
+ message:
+ 'payment_amount_sats as a JSON number must be a safe integer (|value| < 2^53). Use a digit-only string for larger amounts.',
+ };
+ }
+ if (sats < 0) {
+ return {
+ ok: false,
+ code: 'amount_sats_invalid',
+ message: 'payment_amount_sats must be non-negative.',
+ };
+ }
+ return { ok: true, value: BigInt(sats) };
+ }
+ if (typeof sats === 'string') {
+ if (!/^(0|[1-9][0-9]*)$/.test(sats)) {
+ return {
+ ok: false,
+ code: 'amount_sats_invalid',
+ message:
+ 'payment_amount_sats must be a non-negative integer (digit-only string, number, or bigint).',
+ };
+ }
+ return { ok: true, value: BigInt(sats) };
+ }
+ return {
+ ok: false,
+ code: 'amount_sats_invalid',
+ message:
+ 'payment_amount_sats must be a non-negative integer (digit-only string, number, or bigint).',
+ };
+}
+
+// Client-side payload shape for a draft. Accepts strings/numbers from
+// JSON and normalizes to what proposalDrafts.create/update expect.
+// BigInt payment amounts come in as either a digit-string or a SYS
+// decimal; we normalize to satoshis (BigInt). payment_count is passed
+// through sanity bounds so it can't be NaN, negative, or absurd.
+function normalizeDraftPatch(body, maxPaymentCount) {
+ const f = readProposalFields(body);
+ const patch = {};
+ if (f.title !== undefined) patch.title = String(f.title ?? '');
+ if (f.description !== undefined) patch.description = String(f.description ?? '');
+ if (f.name !== undefined) patch.name = String(f.name ?? '');
+ if (f.url !== undefined) patch.url = String(f.url ?? '');
+ if (f.paymentAddress !== undefined) {
+ patch.payment_address = String(f.paymentAddress ?? '');
+ }
+
+ if (f.paymentAmountSats !== undefined) {
+ // Codex PR8 round 5 P2: previously this path forwarded the raw
+ // client value straight to the repo, which threw on malformed
+ // input (`"12.5"`, `"-1"`, `"abc"`, objects, …). The route
+ // catch-all rendered those throws as generic `500 internal`,
+ // which looks like a server bug to the client for what is
+ // actually a request-shape problem. Validate here so we surface
+ // the same `400 validation_failed` shape as the `paymentAmount`
+ // branch below.
+ const parsed = parsePaymentAmountSatsInput(f.paymentAmountSats);
+ if (!parsed.ok) {
+ const err = new Error('payment_amount_sats invalid');
+ err.status = 400;
+ err.body = {
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_amount_sats',
+ code: parsed.code,
+ message: parsed.message,
+ },
+ ],
+ };
+ throw err;
+ }
+ // `parsePaymentAmountSatsInput` normalizes to BigInt so the
+ // int64 gate below has a single type to compare against.
+ patch.payment_amount_sats = parsed.value;
+ } else if (f.paymentAmount !== undefined) {
+ // Decimal SYS value — convert to sats up front so the draft row
+ // matches the submission row's unit.
+ try {
+ patch.payment_amount_sats =
+ proposalValidate.parsePaymentAmountToSats(f.paymentAmount);
+ } catch (e) {
+ const err = new Error('payment_amount invalid');
+ err.status = 400;
+ err.body = {
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_amount',
+ code: 'amount_invalid',
+ message: e.message,
+ },
+ ],
+ };
+ throw err;
+ }
+ }
+
+ // Codex PR8 round 16 P2: enforce the SQLite INTEGER (int64) ceiling
+ // on any accepted payment_amount_sats BEFORE the draft hits the
+ // DB layer. The structural validator applies the same gate for
+ // submissions (see proposalValidate.MAX_PAYMENT_AMOUNT_SATS), but
+ // draft validation is intentionally looser — it only checked the
+ // digit-shape regex / BigInt >= 0 and forwarded arbitrarily large
+ // values straight to `proposal_drafts.payment_amount_sats`, where
+ // an int64 overflow surfaced as a generic 500 instead of a
+ // deterministic 400. Normalize all three accepted shapes (BigInt
+ // / Number / digit-string) to BigInt for the comparison, and
+ // persist the normalized BigInt so the drafts repo never has to
+ // re-parse a string. The `paymentAmount` branch above already
+ // emits a BigInt from `parsePaymentAmountToSats`, so it flows
+ // through this gate automatically.
+ if (patch.payment_amount_sats !== undefined) {
+ const asBig =
+ typeof patch.payment_amount_sats === 'bigint'
+ ? patch.payment_amount_sats
+ : BigInt(patch.payment_amount_sats);
+ if (asBig > proposalValidate.MAX_PAYMENT_AMOUNT_SATS) {
+ const err = new Error('payment_amount_sats exceeds maximum');
+ err.status = 400;
+ err.body = {
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_amount_sats',
+ code: 'amount_too_large',
+ message: 'Payment amount exceeds the maximum supported value.',
+ },
+ ],
+ };
+ throw err;
+ }
+ patch.payment_amount_sats = asBig;
+ }
+
+ if (f.paymentCount !== undefined) {
+ const n = Math.trunc(Number(f.paymentCount));
+ if (!Number.isFinite(n) || n < 1 || n > maxPaymentCount) {
+ const err = new Error('payment_count out of range');
+ err.status = 400;
+ err.body = {
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_count',
+ code: 'payment_count_range',
+ message: `payment_count must be between 1 and ${maxPaymentCount}.`,
+ },
+ ],
+ };
+ throw err;
+ }
+ patch.payment_count = n;
+ }
+
+ if (f.startEpoch !== undefined) {
+ patch.start_epoch = f.startEpoch === null ? null : Math.trunc(Number(f.startEpoch));
+ }
+ if (f.endEpoch !== undefined) {
+ patch.end_epoch = f.endEpoch === null ? null : Math.trunc(Number(f.endEpoch));
+ }
+ return patch;
+}
+
+// Drafts and submissions hold BigInts for payment_amount_sats; JSON
+// can't serialize them. Mapper returns the JSON-safe shape the wizard
+// will consume (stringified bigints, ISO-ish timestamps left as ms
+// because the UI uses `new Date(ms)` directly).
+function jsonDraft(row) {
+ if (!row) return null;
+ return {
+ id: row.id,
+ userId: row.userId,
+ title: row.title,
+ name: row.name,
+ url: row.url,
+ description: row.description,
+ paymentAddress: row.paymentAddress,
+ paymentAmountSats: row.paymentAmountSats.toString(),
+ paymentCount: row.paymentCount,
+ startEpoch: row.startEpoch,
+ endEpoch: row.endEpoch,
+ createdAt: row.createdAt,
+ updatedAt: row.updatedAt,
+ };
+}
+
+function jsonSubmission(row) {
+ if (!row) return null;
+ return {
+ id: row.id,
+ userId: row.userId,
+ draftId: row.draftId,
+
+ parentHash: row.parentHash,
+ revision: row.revision,
+ timeUnix: row.timeUnix,
+ dataHex: row.dataHex,
+ proposalHash: row.proposalHash,
+
+ title: row.title,
+ name: row.name,
+ url: row.url,
+ paymentAddress: row.paymentAddress,
+ paymentAmountSats: row.paymentAmountSats.toString(),
+ paymentCount: row.paymentCount,
+ startEpoch: row.startEpoch,
+ endEpoch: row.endEpoch,
+
+ status: row.status,
+ collateralTxid: row.collateralTxid,
+ collateralConfs: row.collateralConfs,
+ governanceHash: row.governanceHash,
+ failReason: row.failReason,
+ failDetail: row.failDetail,
+
+ createdAt: row.createdAt,
+ updatedAt: row.updatedAt,
+ };
+}
+
+// Validate :id parameter. Submission / draft IDs are SQLite integer
+// rowids, always positive. Reject anything else as 404 (not 400) so
+// scanners probing for /submissions/foo can't distinguish "doesn't
+// exist" from "malformed id".
+function parseIntId(raw) {
+ if (raw == null) return null;
+ const n = Number(raw);
+ if (!Number.isInteger(n) || n <= 0 || String(n) !== String(raw)) return null;
+ return n;
+}
+
+// ---------------------------------------------------------------------
+// Factory.
+// ---------------------------------------------------------------------
+
+function createGovProposalsRouter({
+ drafts,
+ submissions,
+ sessionMw,
+ csrfMw,
+ rpc = {},
+ runAtomic,
+ now = () => Date.now(),
+ maxDraftsPerUser = DEFAULT_MAX_DRAFTS_PER_USER,
+ maxPaymentCount = DEFAULT_MAX_PAYMENT_COUNT,
+} = {}) {
+ if (!drafts || typeof drafts.create !== 'function') {
+ throw new Error('createGovProposalsRouter: drafts repo is required');
+ }
+ if (!submissions || typeof submissions.create !== 'function') {
+ throw new Error('createGovProposalsRouter: submissions repo is required');
+ }
+ if (!sessionMw || typeof sessionMw.requireAuth !== 'function') {
+ throw new Error('createGovProposalsRouter: sessionMw is required');
+ }
+ if (!csrfMw || typeof csrfMw.require !== 'function') {
+ throw new Error('createGovProposalsRouter: csrfMw is required');
+ }
+ if (typeof runAtomic !== 'function') {
+ throw new Error('createGovProposalsRouter: runAtomic is required');
+ }
+
+ const router = express.Router();
+
+ // Everything below requires auth + CSRF. We apply both at the router
+ // level for brevity — matches the pattern used by /vault.
+ router.use(sessionMw.requireAuth, csrfMw.require);
+
+ // -----------------------------------------------------------------
+ // Drafts
+ // -----------------------------------------------------------------
+
+ // POST /gov/proposals/drafts
+ //
+ // Input: partial draft body (all fields optional).
+ // Output: 201 { draft }
+ // Errors: 409 draft_limit when the user is already at maxDraftsPerUser.
+ router.post('/drafts', (req, res) => {
+ const userId = req.user.id;
+ try {
+ const count = drafts.countForUser(userId);
+ if (count >= maxDraftsPerUser) {
+ return res
+ .status(409)
+ .json({ error: 'conflict', reason: 'draft_limit' });
+ }
+ const patch = normalizeDraftPatch(req.body, maxPaymentCount);
+ const created = drafts.create(userId, patch);
+ return res.status(201).json({ draft: jsonDraft(created) });
+ } catch (err) {
+ if (err.status && err.body) {
+ return res.status(err.status).json(err.body);
+ }
+ // eslint-disable-next-line no-console
+ console.error('[POST /gov/proposals/drafts]', err);
+ return res.status(500).json({ error: 'internal' });
+ }
+ });
+
+ // GET /gov/proposals/drafts
+ //
+ // Output: { drafts: [...], total }
+ router.get('/drafts', (req, res) => {
+ const userId = req.user.id;
+ const list = drafts.listForUser(userId);
+ return res.json({ drafts: list.map(jsonDraft), total: list.length });
+ });
+
+ // GET /gov/proposals/drafts/:id
+ router.get('/drafts/:id', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ const draft = drafts.getByIdForUser(id, userId);
+ if (!draft) return res.status(404).json({ error: 'not_found' });
+ return res.json({ draft: jsonDraft(draft) });
+ });
+
+ // PATCH /gov/proposals/drafts/:id
+ router.patch('/drafts/:id', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ try {
+ const patch = normalizeDraftPatch(req.body, maxPaymentCount);
+ const updated = drafts.update(id, userId, patch);
+ if (!updated) return res.status(404).json({ error: 'not_found' });
+ return res.json({ draft: jsonDraft(updated) });
+ } catch (err) {
+ if (err.status && err.body) {
+ return res.status(err.status).json(err.body);
+ }
+ // eslint-disable-next-line no-console
+ console.error('[PATCH /gov/proposals/drafts]', err);
+ return res.status(500).json({ error: 'internal' });
+ }
+ });
+
+ // DELETE /gov/proposals/drafts/:id
+ router.delete('/drafts/:id', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ const changes = drafts.remove(id, userId);
+ if (changes === 0) return res.status(404).json({ error: 'not_found' });
+ return res.status(204).end();
+ });
+
+ // -----------------------------------------------------------------
+ // POST /gov/proposals/prepare
+ //
+ // Input: full proposal fields + optional `draftId` + optional
+ // `consumeDraft` bool (default true when draftId present).
+ // Output: 201 {
+ // submission, // full jsonSubmission()
+ // opReturnHex, // 64-char hex (OP_RETURN push)
+ // canonicalJson, // exact bytes Core will hash
+ // payloadBytes, // length of canonicalJson
+ // collateralFeeSats: "15000000000",
+ // requiredConfirmations: 6,
+ // }
+ //
+ // Semantics:
+ // 1. Canonicalize + structurally validate. Field errors -> 400.
+ // 2. Compute hash (deterministic, no RPC).
+ // 3. If rpc.gObjectCheck is provided, call it. Core rejects -> 422.
+ // 4. Persist submission in 'prepared' state. If draftId belongs
+ // to the user and consumeDraft is truthy, delete it in the
+ // same atomic block so a crash can't orphan it.
+ //
+ // Idempotency: if the same (userId, proposal_hash) submission already
+ // exists AND is still in 'prepared' state, return it as-is instead
+ // of creating a duplicate. This matters because the wizard may re-
+ // call /prepare on refresh; we don't want to strand half-complete
+ // rows every time.
+ // -----------------------------------------------------------------
+ router.post('/prepare', async (req, res) => {
+ const userId = req.user.id;
+ const body = req.body || {};
+ const f = readProposalFields(body);
+
+ // Merge canonical inputs. We take paymentAmount either as sats
+ // (preferred) or as SYS-decimal string (wizard convenience).
+ const rawForCanon = {
+ name: f.name ?? '',
+ url: f.url ?? '',
+ payment_address: f.paymentAddress ?? '',
+ start_epoch: f.startEpoch,
+ end_epoch: f.endEpoch,
+ };
+ if (f.paymentAmountSats !== undefined) {
+ // Codex PR8 round 17 P2: earlier this path used a bare
+ // `BigInt(f.paymentAmountSats)` inside a try/catch. That
+ // accepted raw JS numbers above `Number.MAX_SAFE_INTEGER`,
+ // which `JSON.parse` has already rounded *before* we see
+ // them. `BigInt(9007199254740993)` (what the caller typed)
+ // never happens — what we actually hand to `BigInt` is the
+ // already-rounded double, e.g. `9007199254740992`. We then
+ // canonicalize + hash + persist THAT value, so the stored
+ // submission encodes a different payment amount than the
+ // caller sent, silently. Route through the shared
+ // `parsePaymentAmountSatsInput` helper which enforces
+ // `Number.isSafeInteger` for numeric input; clients that
+ // legitimately need larger amounts must send a digit string
+ // (which `BigInt` parses losslessly).
+ const parsed = parsePaymentAmountSatsInput(f.paymentAmountSats);
+ if (!parsed.ok) {
+ return res.status(400).json({
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_amount_sats',
+ code: parsed.code,
+ message: parsed.message,
+ },
+ ],
+ });
+ }
+ rawForCanon.payment_amount_sats = parsed.value;
+ } else if (f.paymentAmount !== undefined) {
+ rawForCanon.payment_amount = f.paymentAmount;
+ }
+
+ // Canonicalize & structurally validate.
+ const canon = proposalValidate.canonicalize(rawForCanon);
+ const nowSeconds = Math.floor(now() / 1000);
+ const structural = proposalValidate.validateStructural(canon, {
+ nowSeconds,
+ });
+ if (!structural.ok) {
+ return res.status(400).json({
+ error: 'validation_failed',
+ issues: structural.issues,
+ });
+ }
+
+ // payment_count: Core has no bound, we enforce a UX guardrail.
+ let paymentCount = 1;
+ if (f.paymentCount !== undefined) {
+ const n = Math.trunc(Number(f.paymentCount));
+ if (!Number.isFinite(n) || n < 1 || n > maxPaymentCount) {
+ return res.status(400).json({
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'payment_count',
+ code: 'payment_count_range',
+ message: `payment_count must be between 1 and ${maxPaymentCount}.`,
+ },
+ ],
+ });
+ }
+ paymentCount = n;
+ }
+
+ // Hashing fields are frozen at prepare time. parent_hash and
+ // revision are fixed ('0'/1) for user-submitted top-level
+ // proposals; time defaults to our clock so a stale client can't
+ // backdate a submission to avoid the "expiration" check in Core.
+ //
+ // Codex PR8 round 2 P1: idempotency must key on the time-free
+ // canonical payload (dataHex), NOT proposalHash, because
+ // proposalHash bakes in `time`. Two retries of the same logical
+ // /prepare across a one-second boundary would otherwise produce
+ // different hashes and both land in the DB. If we already have a
+ // `prepared` row for this user with the same dataHex, replay its
+ // frozen fields and skip the insert entirely — this also skips
+ // the RPC pre-flight, which is both redundant (Core already
+ // accepted it once) and subject to rate-limiting on retries.
+ const parentHash = '0';
+ const revision = 1;
+
+ // Codex PR8 round 8 P1: this async route had several persistence
+ // calls outside any try/catch, so a synchronous DB throw from
+ // better-sqlite3 (SQLITE_BUSY / I/O error / corrupt-index, etc.)
+ // became an unhandled promise rejection rather than a controlled
+ // JSON 500. Express 4 does NOT catch async handler rejections —
+ // in production those surface as hung requests and/or process-
+ // level instability under transient faults. Wrap everything
+ // from this point on in a top-level try/catch. The existing
+ // inner try/catch blocks (gObjectCheck soft-fail, rehash,
+ // hash-computation, persist-race) all do early `return
+ // res.status(...)`, so they still short-circuit before reaching
+ // this outer catch — the outer catch only fires for truly
+ // unexpected DB / RPC / compute failures.
+ let existingByPayload;
+ try {
+ existingByPayload = submissions.findPreparedByDataHexForUser(
+ userId,
+ canon.dataHex
+ );
+ } catch (err) {
+ // eslint-disable-next-line no-console
+ console.error(
+ '[POST /gov/proposals/prepare] findPreparedByDataHexForUser failed',
+ err
+ );
+ return res.status(500).json({ error: 'internal' });
+ }
+
+ // Determine the hashing time. On the idempotent replay path we
+ // MUST reuse the frozen `timeUnix` from the existing row — any
+ // other value would change the proposal hash we commit to via
+ // OP_RETURN, and the client already has the original envelope.
+ // On the fresh path, use our server clock (`nowSeconds`) rather
+ // than trust client input, so a stale client can't backdate the
+ // submission past Core's expiration check.
+ const timeUnix = existingByPayload
+ ? existingByPayload.timeUnix
+ : nowSeconds;
+
+ // Preflight Core BEFORE branching on idempotency. Previously the
+ // idempotent short-circuit returned early without re-running the
+ // check, so if the original /prepare created the row during a
+ // transient RPC outage (the `catch` block below soft-allows net
+ // errors), every retry would replay the cached row and never
+ // revalidate once Core recovered. A Core-invalid proposal could
+ // then proceed to collateral payment and fail only in the
+ // dispatcher — after the 150 SYS fee is already burned.
+ // Running the preflight first ensures every /prepare response is
+ // backed by a fresh Core ack (or an explicit soft-fail we logged).
+ // (Codex PR8 round 5 P1.)
+ //
+ // `rpc` is advertised as optional and appFactory.js explicitly
+ // passes `null` when no Core connection is wired (a valid default
+ // deployment). Destructured defaults (`rpc = {}`) only fire for
+ // `undefined`, so an explicit null would flow through and the
+ // bare `typeof rpc.gObjectCheck` dereference would throw
+ // TypeError inside this async handler, surfacing as an unhandled
+ // rejection instead of a clean "skip preflight".
+ if (rpc && typeof rpc.gObjectCheck === 'function') {
+ try {
+ // Codex PR8 round 6 P1: Syscoin Core's `gobject_check` takes
+ // exactly ONE positional arg — `hex_data` — see
+ // syscoin/src/rpc/governance.cpp::gobject_check. Core
+ // derives parentHash/revision/nTime internally just to
+ // construct the validator; they do NOT participate in the
+ // submission hash, so they are irrelevant to preflight. An
+ // earlier iteration forwarded the full 4-tuple to match
+ // `gobject_submit`; Core rejects that with
+ // RPC_INVALID_PARAMS: too many positional arguments
+ // which our "terminal" heuristic below then misclassifies
+ // as 422 core_rejected on perfectly valid proposals. Pass
+ // just the canonical dataHex.
+ const resp = await rpc.gObjectCheck(canon.dataHex);
+ const result =
+ resp && typeof resp === 'object' && 'result' in resp
+ ? resp.result
+ : resp;
+ // Codex PR8 round 6 P1: Syscoin Core's `gobject_check`
+ // returns `{ "Object status": "OK" }` on accept (literal
+ // key with a space; see governance.cpp line 111:
+ // objResult.pushKV("Object status", "OK");
+ // ). It does NOT use `{ "Object": "success" }` — that was
+ // our previous (wrong) assumption, inherited from legacy
+ // Dash docs. Without this fix, every successful preflight
+ // fell through to the "reject" branch and surfaced as
+ // 422 core_rejected. Be lenient on the "OK" casing but
+ // strict on the key.
+ const statusStr =
+ result && (result['Object status'] || result['object status']);
+ if (statusStr && String(statusStr).toUpperCase() === 'OK') {
+ // accepted
+ } else {
+ const msg =
+ (result && (result.Error || result.error || result['Error Message'])) ||
+ JSON.stringify(result);
+ const issues = proposalValidate.parseCoreRejectMessage(msg);
+ return res.status(422).json({ error: 'core_rejected', issues });
+ }
+ } catch (err) {
+ // Codex PR8 round 11 P1: the previous heuristic included a
+ // bare /invalid/ token, which matched transport / parser
+ // errors JSON-RPC clients commonly wrap with the word
+ // "invalid" ("Invalid URL", "invalid response from server",
+ // "invalid JSON-RPC response", "invalid utf-8 sequence in
+ // headers", etc.). Those are temporary outages, not Core
+ // rejections — classifying them as 422 core_rejected
+ // blocks legitimate /prepare calls until the node/network
+ // recovers.
+ //
+ // Terminal = phrases Syscoin Core explicitly emits from
+ // CGovernanceObject::IsValidLocally() and gobject_check's
+ // reject branches. Everything else (including anything
+ // containing a bare "invalid" / "error" / "failed") is
+ // treated as transient and soft-allowed, because JSON-RPC
+ // clients routinely wrap transport and parser failures
+ // with those tokens ("Invalid URL", "invalid response
+ // from server", "invalid JSON-RPC response"). Note: we
+ // deliberately do NOT use `parseCoreRejectMessage` for
+ // classification — its final arm raises a catch-all
+ // `core_rejected` issue for ANY non-empty string, which
+ // would false-positive every transport error as a Core
+ // rejection.
+ const msg = String((err && err.message) || err);
+ const terminalCorePhrases = [
+ // CGovernanceObject::IsValidLocally phrases (same set
+ // `parseCoreRejectMessage` maps to structured codes):
+ /name exceeds/i,
+ /name\s+(?:is\s+)?empty/i,
+ /name contains invalid/i,
+ /start_epoch/i,
+ /end_epoch/i,
+ /payment_amount is negative/i,
+ /payment_amount\b.*not found/i,
+ /payment_address is invalid/i,
+ /payment_address\b.*not found/i,
+ /payment_address can't have whitespaces/i,
+ /script addresses are not supported/i,
+ /url.*whitespaces/i,
+ /url too short/i,
+ /url invalid/i,
+ /url\b.*not found/i,
+ /data exceeds/i,
+ /type is not 1/i,
+ /type field not found/i,
+ /governance object (?:is )?expired/i,
+ /proposal (?:is )?expired/i,
+ // gobject_check wrapper rejects:
+ /Governance object is not valid/i,
+ /Object submission rejected/i,
+ /Invalid parent hash/i,
+ /Invalid (?:object )?signature/i,
+ /Invalid object type/i,
+ /Invalid proposal/i,
+ /Invalid data hex/i,
+ /hash mismatch/i,
+ /collateral (?:missing|invalid|rejected)/i,
+ ];
+ const isTerminal = terminalCorePhrases.some((re) => re.test(msg));
+ if (isTerminal) {
+ const issues = proposalValidate.parseCoreRejectMessage(msg);
+ return res.status(422).json({ error: 'core_rejected', issues });
+ }
+ // eslint-disable-next-line no-console
+ console.warn(
+ '[POST /gov/proposals/prepare] gObjectCheck soft-fail',
+ msg
+ );
+ // fall through — transient; let the idempotent replay
+ // branch or subsequent insert handle it.
+ }
+ }
+
+ // Idempotent replay: Core just re-acked (or we soft-failed), so
+ // the cached envelope is safe to return.
+ let proposalHash;
+ let opReturnHex;
+ if (existingByPayload) {
+ proposalHash = existingByPayload.proposalHash;
+ // Rebuild opReturnHex from the frozen fields; the stored hash
+ // is the big-endian display form, so we rehash rather than
+ // byte-reverse to keep the derivation honest (and to catch
+ // any drift between computeProposalHash and the row).
+ try {
+ opReturnHex = computeProposalHash({
+ parentHash,
+ revision,
+ time: timeUnix,
+ dataHex: existingByPayload.dataHex,
+ }).opReturnBytes.toString('hex');
+ } catch (err) {
+ // eslint-disable-next-line no-console
+ console.error(
+ '[POST /gov/proposals/prepare] rehash error',
+ err
+ );
+ return res.status(500).json({ error: 'internal' });
+ }
+ return res.status(200).json({
+ submission: jsonSubmission(existingByPayload),
+ opReturnHex,
+ canonicalJson: canon.json,
+ payloadBytes: canon.byteLength,
+ collateralFeeSats: COLLATERAL_FEE_SATS.toString(),
+ requiredConfirmations: REQUIRED_CONFIRMATIONS,
+ idempotent: true,
+ });
+ }
+
+ let hash;
+ try {
+ hash = computeProposalHash({
+ parentHash,
+ revision,
+ time: timeUnix,
+ dataHex: canon.dataHex,
+ });
+ } catch (err) {
+ // Should be impossible post-validation; keep as 500 so we see it.
+ // eslint-disable-next-line no-console
+ console.error('[POST /gov/proposals/prepare] hash error', err);
+ return res.status(500).json({ error: 'internal' });
+ }
+ proposalHash = hash.displayHex;
+ opReturnHex = hash.opReturnBytes.toString('hex');
+
+ // Draft consumption: default to "yes" if a draftId is supplied
+ // and belongs to the user. The frontend explicitly opts out with
+ // { consumeDraft: false } if it ever wants to publish from a
+ // draft without deleting it.
+ //
+ // Codex PR8 round 9 P2: the ownership lookup + insert USED to
+ // straddle the `runAtomic` boundary: we read the draft, then
+ // inserted with that cached id. A concurrent delete/consume of
+ // the same draft (e.g. another tab, or an earlier /prepare on
+ // the same draft that won a race) could therefore invalidate
+ // the FK between the read and the insert, and the insert would
+ // throw a SQLITE_CONSTRAINT foreign-key error that bled through
+ // as a generic 500. That's a normal race we should degrade
+ // gracefully through, not a server fault. Parse the candidate
+ // id here (still pure), but defer the actual ownership lookup
+ // (and the corresponding removal) to *inside* the atomic block
+ // below so both see the same point-in-time view of `drafts`.
+ const consumeDraft =
+ body.consumeDraft !== undefined ? Boolean(body.consumeDraft) : true;
+ let candidateDraftId = null;
+ if (body.draftId !== undefined && body.draftId !== null) {
+ candidateDraftId = parseIntId(body.draftId) || null;
+ }
+
+ let createdRow;
+ try {
+ createdRow = runAtomic(() => {
+ // Resolve the draft *inside* the transaction so the ownership
+ // check and the insert (and the optional delete) see a
+ // consistent snapshot. better-sqlite3's `db.transaction`
+ // holds a write lock for the duration of this callback, so
+ // no other writer can delete the draft out from under us
+ // between the getByIdForUser and the submissions.create.
+ // If a concurrent delete already happened *before* we
+ // grabbed the lock, the draft is gone — degrade to
+ // draftId:null (no FK violation) rather than 500ing, since
+ // from the user's perspective the wizard form data is
+ // still perfectly publishable; the draft row is just
+ // bookkeeping.
+ let resolvedDraftId = null;
+ if (candidateDraftId) {
+ const d = drafts.getByIdForUser(candidateDraftId, userId);
+ if (d) resolvedDraftId = candidateDraftId;
+ }
+ const row = submissions.create({
+ userId,
+ draftId: resolvedDraftId,
+ parentHash,
+ revision,
+ timeUnix,
+ dataHex: canon.dataHex,
+ proposalHash,
+ title: f.title ? String(f.title) : canon.payload.name,
+ name: canon.payload.name,
+ url: canon.payload.url,
+ paymentAddress: canon.payload.payment_address,
+ paymentAmountSats: canon.payload.payment_amount_sats,
+ paymentCount,
+ startEpoch: canon.payload.start_epoch,
+ endEpoch: canon.payload.end_epoch,
+ });
+ if (resolvedDraftId && consumeDraft) {
+ drafts.remove(resolvedDraftId, userId);
+ }
+ return row;
+ });
+ } catch (err) {
+ // Codex PR8 round 3 P2: two concurrent /prepare requests with
+ // the same canonical payload would both miss the pre-read
+ // above (`findPreparedByDataHexForUser`) and both attempt to
+ // insert. The partial unique index
+ // `idx_proposal_submissions_user_payload_prepared`
+ // (user_id, data_hex) WHERE status='prepared' will reject the
+ // second insert with SQLITE_CONSTRAINT_UNIQUE. Re-read the
+ // row the winner created and return it as an idempotent 200,
+ // so the loser sees the same canonical envelope as the winner.
+ const msg = String((err && err.message) || err);
+ const constraintHit =
+ (err && (err.code === 'SQLITE_CONSTRAINT_UNIQUE' ||
+ err.code === 'SQLITE_CONSTRAINT')) ||
+ /UNIQUE constraint failed/i.test(msg);
+ if (constraintHit) {
+ // better-sqlite3 is synchronous and can throw (SQLITE_BUSY,
+ // I/O, corrupt index, temp-write-failed) from this read.
+ // Without a local try/catch the throw escapes into the
+ // async Express 4 handler as an unhandled rejection —
+ // Express 4 does not catch async handler errors, so in
+ // prod that becomes a process-level UnhandledPromise-
+ // Rejection warning and a client-visible hang/default
+ // 500 HTML page instead of our structured JSON 500.
+ // Swallow here and surface the same `internal` code the
+ // rest of this handler uses for DB failures.
+ let winner;
+ try {
+ winner = submissions.findPreparedByDataHexForUser(
+ userId,
+ canon.dataHex
+ );
+ } catch (lookupErr) {
+ // eslint-disable-next-line no-console
+ console.error(
+ '[POST /gov/proposals/prepare] winner lookup after unique-race failed',
+ lookupErr
+ );
+ return res.status(500).json({ error: 'internal' });
+ }
+ if (winner) {
+ let winnerOpReturnHex;
+ try {
+ winnerOpReturnHex = computeProposalHash({
+ parentHash,
+ revision,
+ time: winner.timeUnix,
+ dataHex: winner.dataHex,
+ }).opReturnBytes.toString('hex');
+ } catch (rehashErr) {
+ // eslint-disable-next-line no-console
+ console.error(
+ '[POST /gov/proposals/prepare] rehash after race error',
+ rehashErr
+ );
+ return res.status(500).json({ error: 'internal' });
+ }
+ return res.status(200).json({
+ submission: jsonSubmission(winner),
+ opReturnHex: winnerOpReturnHex,
+ canonicalJson: canon.json,
+ payloadBytes: canon.byteLength,
+ collateralFeeSats: COLLATERAL_FEE_SATS.toString(),
+ requiredConfirmations: REQUIRED_CONFIRMATIONS,
+ idempotent: true,
+ });
+ }
+ // Constraint fired but no winner row found — extremely odd
+ // (e.g. another index clashed). Fall through to a generic 500.
+ }
+ // eslint-disable-next-line no-console
+ console.error('[POST /gov/proposals/prepare] persist error', err);
+ return res.status(500).json({ error: 'internal' });
+ }
+
+ return res.status(201).json({
+ submission: jsonSubmission(createdRow),
+ opReturnHex,
+ canonicalJson: canon.json,
+ payloadBytes: canon.byteLength,
+ collateralFeeSats: COLLATERAL_FEE_SATS.toString(),
+ requiredConfirmations: REQUIRED_CONFIRMATIONS,
+ });
+ });
+
+ // -----------------------------------------------------------------
+ // Submissions
+ // -----------------------------------------------------------------
+
+ // GET /gov/proposals/submissions
+ router.get('/submissions', (req, res) => {
+ const userId = req.user.id;
+ const list = submissions.listForUser(userId);
+ return res.json({
+ submissions: list.map(jsonSubmission),
+ total: list.length,
+ });
+ });
+
+ // GET /gov/proposals/submissions/:id
+ router.get('/submissions/:id', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ const row = submissions.getByIdForUser(id, userId);
+ if (!row) return res.status(404).json({ error: 'not_found' });
+ return res.json({ submission: jsonSubmission(row) });
+ });
+
+ // POST /gov/proposals/submissions/:id/attach-collateral
+ //
+ // Input: { collateralTxid: '<64-hex>' }
+ // Output: 200 { submission }
+ // Errors: 404 not_found (not owner / unknown id),
+ // 409 status_not_prepared, 409 txid_already_used,
+ // 400 validation_failed (bad txid).
+ //
+ // This is the "I paid the 150 SYS" handoff. From here the dispatcher
+ // owns the row — the route doesn't need to know whether the user
+ // paid via Pali or pasted from Syscoin-Qt.
+ router.post('/submissions/:id/attach-collateral', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ const body = req.body || {};
+ const txid =
+ typeof body.collateralTxid === 'string'
+ ? body.collateralTxid.trim()
+ : '';
+ if (!HEX64.test(txid)) {
+ return res.status(400).json({
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'collateralTxid',
+ code: 'txid_invalid',
+ message: 'Collateral txid must be 64 hex characters.',
+ },
+ ],
+ });
+ }
+ try {
+ const updated = submissions.attachCollateral(id, userId, txid);
+ if (!updated) return res.status(404).json({ error: 'not_found' });
+ return res.json({ submission: jsonSubmission(updated) });
+ } catch (err) {
+ if (err && err.code === 'status_not_prepared') {
+ return res
+ .status(409)
+ .json({ error: 'conflict', reason: 'status_not_prepared' });
+ }
+ if (err && err.code === 'txid_already_used') {
+ return res
+ .status(409)
+ .json({ error: 'conflict', reason: 'txid_already_used' });
+ }
+ if (err && err.code === 'txid_invalid') {
+ return res.status(400).json({
+ error: 'validation_failed',
+ issues: [
+ {
+ field: 'collateralTxid',
+ code: 'txid_invalid',
+ message: err.message,
+ },
+ ],
+ });
+ }
+ // eslint-disable-next-line no-console
+ console.error(
+ '[POST /gov/proposals/submissions/:id/attach-collateral]',
+ err
+ );
+ return res.status(500).json({ error: 'internal' });
+ }
+ });
+
+ // DELETE /gov/proposals/submissions/:id
+ //
+ // The repo enforces "only `prepared` and `failed` are deletable";
+ // trying to delete anything else returns 0 changes and we 409. We
+ // explicitly check the row state first so the error reason is
+ // actionable ("status_not_deletable") instead of generic 404.
+ router.delete('/submissions/:id', (req, res) => {
+ const userId = req.user.id;
+ const id = parseIntId(req.params.id);
+ if (!id) return res.status(404).json({ error: 'not_found' });
+ const row = submissions.getByIdForUser(id, userId);
+ if (!row) return res.status(404).json({ error: 'not_found' });
+ if (row.status !== 'prepared' && row.status !== 'failed') {
+ return res
+ .status(409)
+ .json({ error: 'conflict', reason: 'status_not_deletable' });
+ }
+ // Codex PR8 round 7 P2: `submissions.remove` only deletes rows
+ // still in `prepared` or `failed` (see the partial DELETE in
+ // proposalSubmissions.js). A concurrent transition between our
+ // pre-read above and this line — e.g. a sibling request flips
+ // the row to `awaiting_collateral` via attach-collateral — is
+ // perfectly possible in a multi-worker deployment, and leaves
+ // `changes === 0`. Silently returning 204 in that case is a
+ // false-success: the submission is still alive and may run to
+ // completion on-chain even though the API told the client it
+ // was deleted. Check the row count and translate a miss into a
+ // 409 so the client can re-read the state and react.
+ const removed = submissions.remove(id, userId);
+ if (Number(removed) === 0) {
+ // Re-read to produce the most actionable reason. If the row
+ // is gone, another tab/device already deleted it — 404 is
+ // correct. If it still exists, its status moved out of the
+ // deletable set — 409 with `status_not_deletable` mirrors the
+ // pre-read branch above.
+ const again = submissions.getByIdForUser(id, userId);
+ if (!again) return res.status(404).json({ error: 'not_found' });
+ return res
+ .status(409)
+ .json({ error: 'conflict', reason: 'status_not_deletable' });
+ }
+ return res.status(204).end();
+ });
+
+ return router;
+}
+
+module.exports = {
+ createGovProposalsRouter,
+ COLLATERAL_FEE_SATS,
+ REQUIRED_CONFIRMATIONS,
+ DEFAULT_MAX_DRAFTS_PER_USER,
+ DEFAULT_MAX_PAYMENT_COUNT,
+};
diff --git a/server.js b/server.js
index d50401f..1f46022 100644
--- a/server.js
+++ b/server.js
@@ -32,6 +32,8 @@ const { client, rpcServices } = require('./services/rpcClient');
const { createCurrentVotesCache } = require('./lib/voteReceipts');
const { createReminderLog } = require('./lib/reminderLog');
const { createReminderDispatcher } = require('./lib/reminderDispatcher');
+const { createProposalDispatcher } = require('./lib/proposalDispatcher');
+const { createProposalRpc } = require('./lib/proposalRpc');
// Per-process cache for `gobject_getcurrentvotes`. Concurrent callers
// hitting GET /gov/receipts for the same proposal share one RPC; a
@@ -141,6 +143,20 @@ const services = finalizeSessionMw(buildServices({ db }));
// would see a 401.
app.use(['/auth', '/vault', '/gov'], services.sessionMw.parse);
+// Proposal RPC adapter.
+//
+// The governance-proposals code (dispatcher + prepare pre-flight)
+// speaks a camelCase surface on purpose — see
+// lib/proposalDispatcher.js for the full rationale.
+// @syscoin/syscoin-js exposes snake_case methods (`gObject_submit`,
+// `gObject_check`, `getRawTransaction`) that return a "stub" you
+// `.call()` to actually fire. The wrapping lives in
+// `lib/proposalRpc.js` so it can be unit-tested directly; without
+// that extraction a regression in the argument shape sent to
+// syscoin-js / syscoind (e.g. stringified revision/time) would only
+// surface in integration.
+const proposalRpc = createProposalRpc(() => rpcServices(client.callRpc));
+
mountAuthAndVault(app, {
services,
mailer,
@@ -167,6 +183,7 @@ mountAuthAndVault(app, {
getCurrentVotes: (proposalHash) => currentVotesCache.get(proposalHash),
invalidateCurrentVotes: (proposalHash) =>
currentVotesCache.invalidate(proposalHash),
+ proposalRpc,
});
// -----------------------------------------------------------------------------
@@ -271,6 +288,84 @@ setTimeout(() => {
}, 60 * 60 * 1000).unref();
}, 5 * 60 * 1000).unref();
+// -----------------------------------------------------------------------------
+// Proposal dispatcher (PR 8).
+//
+// Walks `awaiting_collateral` submissions: bumps confirmation counts
+// from getRawTransaction, fires gObject_submit once >= 6 confs, and
+// transitions rows to `submitted` or `failed`. The mailer hooks
+// resolve the submission's user and send the corresponding template.
+//
+// Same cadence philosophy as the reminder dispatcher: first tick a
+// few minutes after boot (lets the RPC warm up) and then once a
+// minute — fast enough that the 6-conf threshold is observed within
+// about a block of real confirmation, slow enough to be polite to
+// the RPC node (N rows → N getRawTransaction calls per tick). The
+// timer is .unref()'d so it never keeps the process alive on its own.
+// -----------------------------------------------------------------------------
+const proposalDispatcher = createProposalDispatcher({
+ submissions: services.proposalSubmissions,
+ rpc: proposalRpc,
+ onSubmitted: async ({ submission }) => {
+ const user = services.users.findById(submission.userId);
+ if (!user || !user.email) return;
+ await mailer.sendProposalSubmitted({
+ to: user.email,
+ proposalName: submission.name,
+ governanceHash: submission.governanceHash,
+ collateralTxid: submission.collateralTxid,
+ submissionId: submission.id,
+ });
+ },
+ onFailed: async ({ submission }) => {
+ const user = services.users.findById(submission.userId);
+ if (!user || !user.email) return;
+ await mailer.sendProposalFailed({
+ to: user.email,
+ proposalName: submission.name,
+ failReason: submission.failReason,
+ failDetail: submission.failDetail,
+ submissionId: submission.id,
+ });
+ },
+ log: (level, event, meta) => {
+ // eslint-disable-next-line no-console
+ console.log(`[proposal] ${level} ${event}`, meta || '');
+ },
+});
+
+// Codex PR8 round 9 P2: self-scheduling dispatcher loop.
+//
+// The previous implementation used `setInterval(..., 60s)` which
+// fires on a fixed cadence regardless of how long the last tick is
+// still running. Under slow RPC or a large `awaiting_collateral`
+// backlog, a single tick can easily exceed the interval — two
+// workers then start processing the same rows concurrently, which
+// at best doubles the `getRawTransaction` / `gObjectSubmit` load on
+// the RPC node (and any shared rate limiter) and at worst races on
+// state transitions that the CAS guards in proposalSubmissions.js
+// would otherwise collapse cleanly. Serialize with a
+// self-scheduling `setTimeout` that re-arms only AFTER the previous
+// `tick()` resolves (matching the appFactory.js pattern).
+const PROPOSAL_DISPATCHER_INTERVAL_MS = 60 * 1000;
+const PROPOSAL_DISPATCHER_KICKOFF_MS = 5 * 60 * 1000;
+
+async function proposalDispatcherLoop() {
+ try {
+ await proposalDispatcher.tick();
+ } catch (err) {
+ // Dispatcher swallows per-row errors internally; any throw out
+ // here is an invariant violation worth logging but not fatal.
+ // eslint-disable-next-line no-console
+ console.error('[proposal] tick failed', err && err.message);
+ }
+ setTimeout(proposalDispatcherLoop, PROPOSAL_DISPATCHER_INTERVAL_MS).unref();
+}
+
+setTimeout(() => {
+ proposalDispatcherLoop();
+}, PROPOSAL_DISPATCHER_KICKOFF_MS).unref();
+
const PORT = process.env.PORT || 8080;
app.listen(PORT, () => {
// eslint-disable-next-line no-console
diff --git a/tests/appFactory.dispatcher.test.js b/tests/appFactory.dispatcher.test.js
new file mode 100644
index 0000000..c8067fc
--- /dev/null
+++ b/tests/appFactory.dispatcher.test.js
@@ -0,0 +1,112 @@
+'use strict';
+
+// Codex PR8 round 8 P2 regression.
+//
+// `stopProposalDispatcher()` used to only call `clearTimeout()` on the
+// pending timer handle. If a tick was already *in flight* — i.e. we
+// were past `await dispatcher.tick()` at the moment stop was called —
+// the callback would continue running and call setTimeout(...) again,
+// re-arming the polling loop after it was supposed to be stopped.
+// That leaked the dispatcher into test teardown and shutdown paths.
+//
+// This test forces that exact race: we stub the dispatcher so every
+// tick awaits a deferred promise we control. While a tick is parked in
+// its `await`, we call stopProposalDispatcher(); then we release the
+// deferred. If the fix is in place, no new timer should arm and no
+// further ticks should run.
+
+jest.useRealTimers();
+
+const express = require('express');
+
+// We need to replace createProposalDispatcher with a stub we can
+// orchestrate. jest.doMock is OK here because appFactory.js requires
+// the module synchronously at load time.
+const tickGate = {
+ pending: [],
+ tickCount: 0,
+ nextDeferred() {
+ let resolve;
+ const promise = new Promise((r) => {
+ resolve = r;
+ });
+ const d = { promise, resolve };
+ this.pending.push(d);
+ return d;
+ },
+};
+
+jest.doMock('../lib/proposalDispatcher', () => ({
+ createProposalDispatcher: () => ({
+ async tick() {
+ tickGate.tickCount += 1;
+ const d = tickGate.nextDeferred();
+ await d.promise;
+ },
+ }),
+}));
+
+const { openDatabase } = require('../lib/db');
+const { createMailer } = require('../lib/mailer');
+const { createApp } = require('../lib/appFactory');
+
+function wait(ms) {
+ return new Promise((r) => setTimeout(r, ms));
+}
+
+describe('appFactory: stopProposalDispatcher (Codex round 8 P2)', () => {
+ test('stop during an in-flight tick does NOT rearm the loop', async () => {
+ const db = openDatabase(':memory:');
+ const mailer = createMailer({ transport: 'memory', from: 't@x.com' });
+ // Minimal RPC adapter is enough — dispatcher itself is stubbed,
+ // so it never actually calls into this.
+ const proposalRpc = {
+ getRawTransaction: async () => ({ confirmations: 0 }),
+ gObjectSubmit: async () => 'hash',
+ gObjectCheck: async () => ({ 'Object status': 'OK' }),
+ };
+
+ // Use a stripped-down createApp invocation. We bypass mailer-URL
+ // requirements by providing a dummy mailer and no /gov masternode
+ // deps, which createApp treats as optional. The dispatcher timing
+ // is what we care about — fire the kickoff almost immediately so
+ // we don't spin Jest for 5s.
+ const { stopProposalDispatcher } = createApp({
+ db,
+ mailer,
+ proposalRpc,
+ startProposalDispatcher: true,
+ // Make the *next-interval* setTimeout short so we'd notice a
+ // rearm quickly. The *kickoff* timer is capped by
+ // `Math.min(5000, proposalDispatcherIntervalMs)` in appFactory
+ // so setting this to 50 also makes the kickoff fire in ~50ms.
+ proposalDispatcherIntervalMs: 50,
+ });
+
+ try {
+ // Wait for the first tick to enter its `await`.
+ for (let i = 0; i < 200; i++) {
+ if (tickGate.tickCount >= 1 && tickGate.pending.length >= 1) break;
+ await wait(10);
+ }
+ expect(tickGate.tickCount).toBe(1);
+ expect(tickGate.pending).toHaveLength(1);
+
+ // Stop while the tick is still parked on its deferred.
+ stopProposalDispatcher();
+
+ // Release the in-flight tick. The fix must prevent the callback
+ // from re-arming after this resolves.
+ tickGate.pending[0].resolve();
+
+ // Give the event loop plenty of time for a rogue rearm to fire.
+ // proposalDispatcherIntervalMs is 50ms; we wait ~10x to be sure.
+ await wait(500);
+
+ expect(tickGate.tickCount).toBe(1);
+ } finally {
+ stopProposalDispatcher();
+ db.close();
+ }
+ });
+});
diff --git a/tests/govProposals.routes.test.js b/tests/govProposals.routes.test.js
new file mode 100644
index 0000000..8770072
--- /dev/null
+++ b/tests/govProposals.routes.test.js
@@ -0,0 +1,1677 @@
+'use strict';
+
+// Route tests for /gov/proposals.
+//
+// We build a self-contained Express app here rather than extending the
+// central buildTestApp helper: the router isn't mounted by appFactory
+// yet (that wiring lives in the be-server-wiring task), so plumbing
+// the deps through appFactory here would be premature. A minimal
+// inline harness also keeps the test scope tight — the things that
+// can break in these tests are the route handlers and the wiring
+// between repos, nothing else.
+
+const express = require('express');
+const cookieParser = require('cookie-parser');
+const request = require('supertest');
+
+const { openDatabase } = require('../lib/db');
+const { createMailer } = require('../lib/mailer');
+const { createUsersRepo } = require('../lib/users');
+const { createSessionStore } = require('../lib/sessions');
+const { createPendingRegistrationsRepo } = require('../lib/pendingRegistrations');
+const { createVaultsRepo } = require('../lib/vaults');
+const { createProposalDraftsRepo } = require('../lib/proposalDrafts');
+const {
+ createProposalSubmissionsRepo,
+} = require('../lib/proposalSubmissions');
+const { createSessionMiddleware } = require('../middleware/session');
+const { createCsrfMiddleware } = require('../middleware/csrf');
+const rateLimiters = require('../middleware/rateLimit');
+const { createAuthRouter } = require('../routes/auth');
+const {
+ createGovProposalsRouter,
+ COLLATERAL_FEE_SATS,
+ REQUIRED_CONFIRMATIONS,
+} = require('../routes/govProposals');
+const { _resetPepperForTests } = require('../lib/kdf');
+
+const SAMPLE_AUTH =
+ 'a4f8b3c1d9e7f2a5b1c6d8e4f7a9b2c5d1e8f4a7b3c9d5e1f6a2b8c4d7e3f5a9';
+
+function buildApp({ gObjectCheck = null, nowRef = null } = {}) {
+ _resetPepperForTests();
+ process.env.SYSNODE_AUTH_PEPPER = 'd'.repeat(64);
+ process.env.NODE_ENV = 'test';
+
+ const db = openDatabase(':memory:');
+ const mailer = createMailer({ transport: 'memory', from: 't@example.com' });
+
+ const users = createUsersRepo(db);
+ const sessions = createSessionStore(db);
+ const pendingRegistrations = createPendingRegistrationsRepo(db);
+ const vaults = createVaultsRepo(db);
+ const drafts = createProposalDraftsRepo(db);
+ const submissions = createProposalSubmissionsRepo(db);
+
+ const sessionMw = createSessionMiddleware({
+ sessions,
+ users,
+ secureCookies: false,
+ });
+ const csrfMw = createCsrfMiddleware({ secureCookies: false });
+ const runAtomic = (fn) => db.transaction(fn)();
+
+ const app = express();
+ app.use(express.json({ limit: '256kb' }));
+ app.use(cookieParser());
+ app.use(sessionMw.parse);
+
+ const syncScheduler = (fn) => {
+ const p = fn();
+ if (p && typeof p.then === 'function') {
+ p.catch(() => {});
+ }
+ };
+
+ app.use(
+ '/auth',
+ createAuthRouter({
+ users,
+ sessions,
+ pendingRegistrations,
+ vaults,
+ mailer,
+ sessionMw,
+ csrfMw,
+ limiters: {
+ login: rateLimiters.disabled(),
+ register: rateLimiters.disabled(),
+ vote: rateLimiters.disabled(),
+ },
+ baseUrl: 'http://api.test.local',
+ frontendUrl: 'http://app.test.local',
+ scheduler: syncScheduler,
+ runAtomic,
+ })
+ );
+
+ app.use(
+ '/gov/proposals',
+ createGovProposalsRouter({
+ drafts,
+ submissions,
+ sessionMw,
+ csrfMw,
+ rpc: gObjectCheck ? { gObjectCheck } : {},
+ runAtomic,
+ ...(nowRef ? { now: () => nowRef.value } : {}),
+ })
+ );
+
+ return { app, db, mailer, users, drafts, submissions };
+}
+
+function extractCookies(res) {
+ const raw = res.headers['set-cookie'] || [];
+ const map = {};
+ for (const c of raw) {
+ const [pair] = c.split(';');
+ const [k, v] = pair.split('=');
+ map[k] = v;
+ }
+ return map;
+}
+
+// Wait for a test-mailer outbox entry matching a predicate. The register
+// handler schedules mailer.sendVerification via an async fn; depending on
+// microtask ordering the email may not yet be in the outbox when the HTTP
+// response returns. Polling with setImmediate drains pending microtasks
+// without introducing real sleeps that would slow the suite.
+async function waitForOutbox(mailer, predicate, { tries = 20 } = {}) {
+ for (let i = 0; i < tries; i++) {
+ const hit = mailer.outbox.find(predicate);
+ if (hit) return hit;
+ await new Promise((r) => setImmediate(r));
+ }
+ throw new Error('waitForOutbox: no matching message within retry budget');
+}
+
+async function loggedInAgent(ctx, email = 'user@example.com') {
+ const agent = request.agent(ctx.app);
+ await agent.post('/auth/register').send({ email, authHash: SAMPLE_AUTH });
+ const msg = await waitForOutbox(ctx.mailer, (m) => m.to === email);
+ const token = msg.html.match(/token=([0-9a-f]{64})/)[1];
+ await agent.post('/auth/verify-email').send({ token });
+ const loginRes = await agent
+ .post('/auth/login')
+ .send({ email, authHash: SAMPLE_AUTH });
+ const csrf = extractCookies(loginRes).csrf;
+ return { agent, csrf };
+}
+
+// Deterministic "happy path" proposal body. The frontend wizard will
+// emit something very close to this.
+function validProposalBody(overrides = {}) {
+ const nowSec = Math.floor(Date.now() / 1000);
+ return {
+ title: 'Fund the docs team',
+ description: 'Anything the hash does NOT commit to.',
+ name: 'fund-docs',
+ url: 'https://forum.syscoin.org/t/fund-docs',
+ paymentAddress: 'sys1qw508d6qejxtdg4y5r3zarvary0c5xw7kygmkq9',
+ paymentAmount: '5000',
+ paymentCount: 3,
+ startEpoch: nowSec + 3600,
+ endEpoch: nowSec + 3600 * 24 * 90,
+ ...overrides,
+ };
+}
+
+// -----------------------------------------------------------------------
+// Drafts
+// -----------------------------------------------------------------------
+
+describe('drafts CRUD', () => {
+ let ctx;
+
+ beforeEach(() => {
+ ctx = buildApp();
+ });
+
+ afterEach(() => {
+ ctx.db.close();
+ });
+
+ test('401 without session', async () => {
+ const res = await request(ctx.app).post('/gov/proposals/drafts').send({});
+ expect(res.status).toBe(401);
+ });
+
+ test('403 csrf_missing when authenticated without token', async () => {
+ const { agent } = await loggedInAgent(ctx);
+ const res = await agent.post('/gov/proposals/drafts').send({});
+ expect(res.status).toBe(403);
+ expect(res.body.error).toBe('csrf_missing');
+ });
+
+ test('create draft persists fields and returns 201', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({
+ title: 'My draft',
+ name: 'My Draft-Name',
+ paymentAmount: '12.5',
+ paymentCount: 2,
+ startEpoch: 2000000000,
+ });
+ expect(res.status).toBe(201);
+ expect(res.body.draft).toMatchObject({
+ title: 'My draft',
+ name: 'My Draft-Name',
+ paymentCount: 2,
+ startEpoch: 2000000000,
+ endEpoch: null,
+ });
+ // 12.5 SYS = 1_250_000_000 sats
+ expect(res.body.draft.paymentAmountSats).toBe('1250000000');
+ expect(typeof res.body.draft.id).toBe('number');
+ });
+
+ test('create rejects payment_count out of range', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentCount: 9999 });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].code).toBe('payment_count_range');
+ });
+
+ test('create rejects invalid payment_amount', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmount: 'not a number' });
+ expect(res.status).toBe(400);
+ expect(res.body.issues[0].field).toBe('payment_amount');
+ });
+
+ // Codex PR8 round 5 P2: malformed payment_amount_sats used to
+ // surface as a generic 500 because the route forwarded the raw
+ // string down to proposalDrafts.create(), which throws out of the
+ // handler. Route-layer validation now rejects it with the same
+ // 400 shape as other validation failures.
+ test('create rejects malformed payment_amount_sats as 400', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const cases = [
+ 'abc', // non-digit
+ '12.5', // decimal (sats are integer)
+ '-1', // negative
+ '007', // leading zeros
+ '', // empty string
+ '1e3', // scientific notation
+ ];
+ for (const sats of cases) {
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: sats });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_sats_invalid');
+ }
+ });
+
+ test('create accepts well-formed payment_amount_sats', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: '15000000000' });
+ expect(res.status).toBe(201);
+ expect(res.body.draft.paymentAmountSats).toBe('15000000000');
+ });
+
+ test('patch rejects malformed payment_amount_sats as 400', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const created = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'draft' });
+ const id = created.body.draft.id;
+ const res = await agent
+ .patch(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: '12.5' });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ });
+
+ // Codex PR8 round 16 P2: shape-valid but over-int64 payment_amount_sats
+ // used to pass draft validation (the regex only checks digit shape)
+ // and then overflowed the SQLite INTEGER column at insert/update,
+ // surfacing as a generic 500. With the MAX_PAYMENT_AMOUNT_SATS gate
+ // the route now returns a deterministic 400 with `amount_too_large`
+ // so the client can correct the payload.
+ test('create rejects payment_amount_sats above int64_max as 400 (Codex round 16 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // 2^63 exactly — one past the largest signed 64-bit integer,
+ // the canonical overflow case.
+ const over = '9223372036854775808';
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: over });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_too_large');
+ });
+
+ test('create accepts exactly int64_max payment_amount_sats (Codex round 16 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // 2^63 - 1 — the largest value the SQLite INTEGER column can
+ // hold. Business-nonsensical for SYS but must pass draft
+ // validation so we do not reject a representable payload.
+ const max = '9223372036854775807';
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: max });
+ expect(res.status).toBe(201);
+ expect(res.body.draft.paymentAmountSats).toBe(max);
+ });
+
+ test('patch rejects payment_amount_sats above int64_max as 400 (Codex round 16 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const created = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'draft' });
+ const id = created.body.draft.id;
+ const res = await agent
+ .patch(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: '9999999999999999999' }); // 10^19, > 2^63
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_too_large');
+ });
+
+ test('create with over-range decimal paymentAmount also rejected as amount_too_large', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // 10^13 SYS = 10^21 sats — parses fine to BigInt, but the
+ // int64 gate below must still fire so the error code is
+ // deterministic instead of a 500 at the repo layer.
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmount: '10000000000000' });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_too_large');
+ });
+
+ // Codex PR8 round 17 P2: raw JSON numbers above
+ // `Number.MAX_SAFE_INTEGER` are rounded by JSON.parse BEFORE the
+ // route handler sees them. Earlier code then `BigInt(n)`-d the
+ // already-rounded double, silently persisting a different
+ // `payment_amount_sats` than the caller sent. Reject non-safe
+ // integers with a dedicated code so clients know to switch to a
+ // digit string for large values.
+ test('create rejects paymentAmountSats as unsafe JSON number (Codex round 17 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // Use the JSON text explicitly so the number is parsed in
+ // transit — supertest's .send(object) would let JS stringify
+ // the literal, and we want the over-safe-integer path.
+ const body = `{"paymentAmountSats": 9007199254740993}`;
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .set('Content-Type', 'application/json')
+ .send(body);
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_sats_unsafe_number');
+ });
+
+ test('create accepts paymentAmountSats at Number.MAX_SAFE_INTEGER (Codex round 17 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // 2^53 - 1 is losslessly representable both as a JS number and
+ // as a BigInt, so the route MUST accept it without forcing the
+ // caller to switch to a string.
+ const body = `{"paymentAmountSats": 9007199254740991}`;
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .set('Content-Type', 'application/json')
+ .send(body);
+ expect(res.status).toBe(201);
+ expect(res.body.draft.paymentAmountSats).toBe('9007199254740991');
+ });
+
+ test('create accepts paymentAmountSats as a large digit string (Codex round 17 P2)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // Well past safe-integer but still inside int64 — the correct
+ // way for a client to submit large amounts.
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ paymentAmountSats: '100000000000000000' }); // 10^17, < 2^63
+ expect(res.status).toBe(201);
+ expect(res.body.draft.paymentAmountSats).toBe('100000000000000000');
+ });
+
+ test('list returns only caller drafts, ordered most-recently-updated first', async () => {
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+
+ await a.agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', a.csrf)
+ .send({ title: 'A1' });
+ const r2 = await a.agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', a.csrf)
+ .send({ title: 'A2' });
+ await b.agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', b.csrf)
+ .send({ title: 'B1' });
+
+ const listA = await a.agent.get('/gov/proposals/drafts');
+ expect(listA.status).toBe(200);
+ expect(listA.body.total).toBe(2);
+ // updated_at DESC then id DESC — most-recent insert first.
+ expect(listA.body.drafts[0].id).toBe(r2.body.draft.id);
+ expect(listA.body.drafts.map((d) => d.title)).toEqual(['A2', 'A1']);
+
+ const listB = await b.agent.get('/gov/proposals/drafts');
+ expect(listB.body.total).toBe(1);
+ expect(listB.body.drafts[0].title).toBe('B1');
+ });
+
+ test('get/patch/delete enforce ownership (404 for others)', async () => {
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+ const created = await a.agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', a.csrf)
+ .send({ title: 'secret' });
+ const id = created.body.draft.id;
+
+ // Owner can read
+ const own = await a.agent.get(`/gov/proposals/drafts/${id}`);
+ expect(own.status).toBe(200);
+
+ // Stranger cannot
+ const notme = await b.agent.get(`/gov/proposals/drafts/${id}`);
+ expect(notme.status).toBe(404);
+ const patchRes = await b.agent
+ .patch(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', b.csrf)
+ .send({ title: 'hijack' });
+ expect(patchRes.status).toBe(404);
+ const delRes = await b.agent
+ .delete(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', b.csrf);
+ expect(delRes.status).toBe(404);
+
+ // Unchanged for the owner
+ const still = await a.agent.get(`/gov/proposals/drafts/${id}`);
+ expect(still.body.draft.title).toBe('secret');
+ });
+
+ test('patch only updates provided fields', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const created = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'orig', name: 'orig-name' });
+ const id = created.body.draft.id;
+ const patched = await agent
+ .patch(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'new title' });
+ expect(patched.status).toBe(200);
+ expect(patched.body.draft.title).toBe('new title');
+ // unchanged
+ expect(patched.body.draft.name).toBe('orig-name');
+ });
+
+ test('delete removes the row (204)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const created = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'bye' });
+ const id = created.body.draft.id;
+ const del = await agent
+ .delete(`/gov/proposals/drafts/${id}`)
+ .set('X-CSRF-Token', csrf);
+ expect(del.status).toBe(204);
+ const get = await agent.get(`/gov/proposals/drafts/${id}`);
+ expect(get.status).toBe(404);
+ });
+
+ test('invalid id params return 404 (never 400)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const r1 = await agent.get('/gov/proposals/drafts/abc');
+ expect(r1.status).toBe(404);
+ const r2 = await agent
+ .patch('/gov/proposals/drafts/0')
+ .set('X-CSRF-Token', csrf)
+ .send({});
+ expect(r2.status).toBe(404);
+ const r3 = await agent
+ .delete('/gov/proposals/drafts/-1')
+ .set('X-CSRF-Token', csrf);
+ expect(r3.status).toBe(404);
+ });
+
+ test('draft_limit is enforced (409)', async () => {
+ // Build with a smaller cap so we don't actually insert 50 rows.
+ _resetPepperForTests();
+ process.env.SYSNODE_AUTH_PEPPER = 'd'.repeat(64);
+ process.env.NODE_ENV = 'test';
+ const db = openDatabase(':memory:');
+ const mailer = createMailer({ transport: 'memory', from: 't@example.com' });
+ const users = createUsersRepo(db);
+ const sessions = createSessionStore(db);
+ const pendingRegistrations = createPendingRegistrationsRepo(db);
+ const vaults = createVaultsRepo(db);
+ const drafts = createProposalDraftsRepo(db);
+ const submissions = createProposalSubmissionsRepo(db);
+ const sessionMw = createSessionMiddleware({
+ sessions,
+ users,
+ secureCookies: false,
+ });
+ const csrfMw = createCsrfMiddleware({ secureCookies: false });
+ const runAtomic = (fn) => db.transaction(fn)();
+
+ const app = express();
+ app.use(express.json());
+ app.use(cookieParser());
+ app.use(sessionMw.parse);
+ const syncScheduler = (fn) => {
+ const p = fn();
+ if (p && p.catch) p.catch(() => {});
+ };
+ app.use(
+ '/auth',
+ createAuthRouter({
+ users,
+ sessions,
+ pendingRegistrations,
+ vaults,
+ mailer,
+ sessionMw,
+ csrfMw,
+ limiters: {
+ login: rateLimiters.disabled(),
+ register: rateLimiters.disabled(),
+ vote: rateLimiters.disabled(),
+ },
+ baseUrl: 'http://api.test.local',
+ frontendUrl: 'http://app.test.local',
+ scheduler: syncScheduler,
+ runAtomic,
+ })
+ );
+ app.use(
+ '/gov/proposals',
+ createGovProposalsRouter({
+ drafts,
+ submissions,
+ sessionMw,
+ csrfMw,
+ runAtomic,
+ maxDraftsPerUser: 2,
+ })
+ );
+ const smallCtx = { app, mailer };
+ const { agent, csrf } = await loggedInAgent(smallCtx, 'cap@example.com');
+ await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: '1' });
+ await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: '2' });
+ const res = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: '3' });
+ expect(res.status).toBe(409);
+ expect(res.body).toMatchObject({
+ error: 'conflict',
+ reason: 'draft_limit',
+ });
+ db.close();
+ });
+});
+
+// -----------------------------------------------------------------------
+// Prepare
+// -----------------------------------------------------------------------
+
+describe('POST /gov/proposals/prepare', () => {
+ let ctx;
+
+ afterEach(() => {
+ if (ctx) ctx.db.close();
+ });
+
+ test('happy path: creates submission, returns hash/canonical/fee', async () => {
+ const calls = [];
+ ctx = buildApp({
+ // Codex PR8 round 6 P1: Core's gobject_check takes ONE
+ // positional arg (hex_data) and returns { "Object status": "OK" }
+ // on accept. Mock the contract the real adapter exposes today.
+ gObjectCheck: async (dataHex) => {
+ calls.push({ dataHex });
+ return { result: { 'Object status': 'OK' } };
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(201);
+ expect(res.body.submission).toMatchObject({
+ status: 'prepared',
+ parentHash: '0',
+ revision: 1,
+ collateralConfs: 0,
+ collateralTxid: null,
+ governanceHash: null,
+ });
+ // proposal_hash is 64 lowercase hex chars
+ expect(res.body.submission.proposalHash).toMatch(/^[0-9a-f]{64}$/);
+ // opReturn hex is 64 hex chars (32 bytes) and equals reversed proposalHash
+ expect(res.body.opReturnHex).toMatch(/^[0-9a-f]{64}$/);
+ const revDisplay = Buffer.from(res.body.opReturnHex, 'hex')
+ .reverse()
+ .toString('hex');
+ expect(revDisplay).toBe(res.body.submission.proposalHash);
+ // Canonical JSON is exactly the proposalValidate form: type first,
+ // flat object, no description/title/paymentCount fields.
+ expect(res.body.canonicalJson).toMatch(/^\{"type":1,"name":/);
+ expect(res.body.canonicalJson).not.toMatch(/description/);
+ expect(res.body.canonicalJson).not.toMatch(/payment_count/);
+ expect(res.body.payloadBytes).toBe(
+ Buffer.byteLength(res.body.canonicalJson, 'utf8')
+ );
+ expect(res.body.collateralFeeSats).toBe(
+ COLLATERAL_FEE_SATS.toString()
+ );
+ expect(res.body.requiredConfirmations).toBe(REQUIRED_CONFIRMATIONS);
+ expect(calls).toHaveLength(1);
+ // Codex PR8 round 6 P1: preflight must call Core's gobject_check
+ // with its single positional arg — `hex_data`. Earlier we passed
+ // the 4-tuple that gobject_submit uses, which Core rejects with
+ // RPC_INVALID_PARAMS and our "terminal" classifier misread as
+ // 422 core_rejected. Assert the wire contract directly.
+ expect(calls[0]).toEqual({
+ dataHex: res.body.submission.dataHex,
+ });
+ });
+
+ test('idempotency is keyed on dataHex, not proposalHash — retries across a second boundary still collapse (Codex round 2 P1)', async () => {
+ // proposalHash bakes in `time`, so two retries of the same
+ // logical /prepare that happen to straddle a one-second
+ // boundary hash differently. A hash-only idempotency check
+ // would create duplicate prepared rows for what is
+ // semantically the same submission. Payload-keyed idempotency
+ // (lookup by user_id + data_hex on `prepared` rows) must
+ // collapse them to one row with stable hash/time.
+ const nowRef = { value: 1_800_000_000_000 }; // ms
+ ctx = buildApp({ nowRef });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const nowSec = Math.floor(nowRef.value / 1000);
+ const body = validProposalBody({
+ startEpoch: nowSec + 3600,
+ endEpoch: nowSec + 3600 * 24 * 30,
+ });
+
+ const r1 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r1.status).toBe(201);
+
+ // Advance wall clock by >1s so a second /prepare computes a
+ // fresh `time` and therefore a *different* proposalHash if
+ // idempotency is hash-keyed.
+ nowRef.value += 2500;
+
+ const r2 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r2.status).toBe(200);
+ expect(r2.body.idempotent).toBe(true);
+ expect(r2.body.submission.id).toBe(r1.body.submission.id);
+ expect(r2.body.submission.proposalHash).toBe(r1.body.submission.proposalHash);
+ expect(r2.body.submission.timeUnix).toBe(r1.body.submission.timeUnix);
+ // And — crucially — there's exactly one row in the DB.
+ const rows = ctx.submissions.listForUser(
+ ctx.users.findByEmail('user@example.com').id
+ );
+ expect(rows).toHaveLength(1);
+ });
+
+ test(
+ 'concurrent prepare race: DB unique index + constraint fallback collapses to a single row (Codex round 3 P2)',
+ async () => {
+ // Simulate a true interleave: the pre-read in /prepare misses
+ // (the competing /prepare hasn't been committed yet from the
+ // caller's perspective), so the route proceeds to INSERT. The
+ // partial unique index `idx_proposal_submissions_user_payload_prepared`
+ // rejects the second INSERT with SQLITE_CONSTRAINT_UNIQUE;
+ // the route's catch block re-reads via findPreparedByDataHexForUser
+ // and responds 200 idempotent with the winner's submission.
+ //
+ // We mimic the interleave by stubbing
+ // `submissions.findPreparedByDataHexForUser` to return null on
+ // the *pre-read* only for the second request, while the DB
+ // state contains the first prepared row.
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = validProposalBody();
+
+ const r1 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r1.status).toBe(201);
+
+ // Monkey-patch the submissions object shared with the router:
+ // force the pre-read miss once, then restore.
+ const realFind = ctx.submissions.findPreparedByDataHexForUser;
+ let miss = true;
+ ctx.submissions.findPreparedByDataHexForUser = (...args) => {
+ if (miss) {
+ miss = false;
+ return null;
+ }
+ return realFind.apply(ctx.submissions, args);
+ };
+
+ const r2 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+
+ ctx.submissions.findPreparedByDataHexForUser = realFind;
+
+ expect(r2.status).toBe(200);
+ expect(r2.body.idempotent).toBe(true);
+ expect(r2.body.submission.id).toBe(r1.body.submission.id);
+ const rows = ctx.submissions.listForUser(
+ ctx.users.findByEmail('user@example.com').id
+ );
+ expect(rows).toHaveLength(1);
+ }
+ );
+
+ test(
+ 'prepare race fallback: DB throw on winner re-read returns JSON 500, not unhandled rejection (Codex round 12 P2)',
+ async () => {
+ // Regression: the unique-constraint recovery path re-reads
+ // the winning row via findPreparedByDataHexForUser. That call
+ // is synchronous better-sqlite3 and can throw (SQLITE_BUSY,
+ // I/O, corrupt index) — without a local try/catch the throw
+ // escaped as an unhandled rejection in the async Express 4
+ // handler, reintroducing exactly the async-error gap the
+ // surrounding code was designed to avoid. Fix: wrap the
+ // re-read in try/catch and return the structured
+ // `{ error: 'internal' }` JSON 500 the rest of the handler
+ // already uses for DB failures.
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = validProposalBody();
+
+ // Prime a prepared row so the UNIQUE index will reject the
+ // second insert and push us into the recovery branch.
+ const r1 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r1.status).toBe(201);
+
+ // Make the pre-read miss (drives the second /prepare into
+ // the INSERT → UNIQUE-race branch) AND make the winner
+ // re-read throw a synthetic SQLITE_BUSY.
+ const realFind = ctx.submissions.findPreparedByDataHexForUser;
+ let calls = 0;
+ ctx.submissions.findPreparedByDataHexForUser = () => {
+ calls += 1;
+ if (calls === 1) return null; // pre-read miss
+ // The recovery-path re-read — this is the call round-12
+ // P2 protects. Throw synchronously as better-sqlite3
+ // would under SQLITE_BUSY.
+ const e = new Error('database is locked');
+ e.code = 'SQLITE_BUSY';
+ throw e;
+ };
+
+ const r2 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+
+ ctx.submissions.findPreparedByDataHexForUser = realFind;
+
+ expect(r2.status).toBe(500);
+ expect(r2.body).toEqual({ error: 'internal' });
+ }
+ );
+
+ test(
+ 'idempotent replay re-runs gObjectCheck preflight; Core-reject after a soft-failed first attempt returns 422 (Codex round 5 P1)',
+ async () => {
+ // Scenario: the *first* /prepare call lands during a transient
+ // Core RPC outage (node unreachable). The route soft-allows
+ // network errors, so the prepared row is created and the
+ // client gets a 201 envelope. Later, the client retries the
+ // exact same canonical body, but now Core is reachable and
+ // deterministically rejects the payload (e.g. checksum-invalid
+ // payment address that fullValidate didn't catch because
+ // address parsing is network-param-specific to Core).
+ //
+ // Before the round-5 fix, the idempotent branch returned the
+ // cached envelope without re-preflighting, so the user would
+ // proceed to burn 150 SYS on a proposal Core will reject at
+ // dispatcher-time. With the fix, gObjectCheck runs on EVERY
+ // /prepare, including the idempotent replay, so Core-reject
+ // surfaces as 422 before any collateral is spent.
+ let phase = 'network-down';
+ ctx = buildApp({
+ gObjectCheck: async () => {
+ if (phase === 'network-down') {
+ // Match the soft-fail heuristic: the route only treats
+ // messages matching /validation|invalid|exceeds|...;/ as
+ // terminal. A pure connection-refused is soft.
+ const e = new Error('ECONNREFUSED: Core unreachable');
+ throw e;
+ }
+ if (phase === 'reject') {
+ return {
+ result: {
+ Error: 'checksum invalid for payment_address',
+ },
+ };
+ }
+ return { result: { 'Object status': 'OK' } };
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = validProposalBody();
+
+ const r1 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r1.status).toBe(201);
+
+ // Flip to deterministic-reject and retry the SAME canonical
+ // body. The idempotency pre-read will find the prepared row.
+ phase = 'reject';
+
+ const r2 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+
+ // Critical: retry must NOT return cached 200-idempotent even
+ // though a prepared row exists for this payload; preflight
+ // has to execute and translate Core's reject into 422.
+ expect(r2.status).toBe(422);
+ expect(r2.body.error).toBe('core_rejected');
+
+ // And the prepared row is still in DB (the user can either
+ // DELETE it or edit the proposal to produce a fresh payload).
+ const rows = ctx.submissions.listForUser(
+ ctx.users.findByEmail('user@example.com').id
+ );
+ expect(rows).toHaveLength(1);
+ expect(rows[0].status).toBe('prepared');
+ expect(rows[0].id).toBe(r1.body.submission.id);
+ }
+ );
+
+ test('hash is deterministic: same inputs → same proposalHash', async () => {
+ ctx = buildApp();
+ // Two different users preparing the same proposal text — because
+ // our `time` field derives from the server clock, we can't
+ // actually compare hashes across two separate /prepare calls.
+ // Instead we assert the *shape* is deterministic: call the hash
+ // function directly with fixed inputs and verify the route's
+ // output, given a frozen time, matches.
+ const frozen = 1_700_000_000_000; // ms
+ const app = ctx.app;
+ // Patch Date.now used by the router? The route captures `now` at
+ // factory time; we don't have access here. Instead check the
+ // looser property: proposalHash changes when ANY canonical field
+ // changes, but two simultaneous back-to-back calls with identical
+ // canonical content return identical (idempotent) submissions.
+ void frozen;
+ void app;
+
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = validProposalBody();
+ const r1 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ const r2 = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(r1.body.submission.proposalHash).toMatch(/^[0-9a-f]{64}$/);
+ // Idempotent: second call returns the existing row, NOT a new one.
+ expect(r2.status).toBe(200);
+ expect(r2.body.idempotent).toBe(true);
+ expect(r2.body.submission.id).toBe(r1.body.submission.id);
+ });
+
+ test('validation_failed for empty name', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody({ name: '' }));
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues.find((i) => i.field === 'name')).toBeTruthy();
+ });
+
+ test('validation_failed for bad URL scheme', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody({ url: 'javascript:alert(1)' }));
+ expect(res.status).toBe(400);
+ expect(
+ res.body.issues.find((i) => i.code === 'url_scheme')
+ ).toBeTruthy();
+ });
+
+ test('validation_failed for past end_epoch', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const past = Math.floor(Date.now() / 1000) - 86400;
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(
+ validProposalBody({
+ startEpoch: past - 3600,
+ endEpoch: past,
+ })
+ );
+ expect(res.status).toBe(400);
+ expect(res.body.issues.some((i) => i.code === 'epoch_past')).toBe(true);
+ });
+
+ test('validation_failed for payment_count out of range (60 max default)', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody({ paymentCount: 1000 }));
+ expect(res.status).toBe(400);
+ expect(
+ res.body.issues.find((i) => i.code === 'payment_count_range')
+ ).toBeTruthy();
+ });
+
+ test('core_rejected when gObjectCheck returns non-success', async () => {
+ ctx = buildApp({
+ // Codex PR8 round 6 P1: a non-"Object status: OK" response
+ // (including a plain `Error` message) is a rejection. Parse
+ // the message for codes and surface 422.
+ gObjectCheck: async () => ({
+ result: { Error: 'name exceeds 40 characters' },
+ }),
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(422);
+ expect(res.body.error).toBe('core_rejected');
+ expect(
+ res.body.issues.find((i) => i.code === 'name_too_long')
+ ).toBeTruthy();
+ });
+
+ test('core_rejected classifies thrown validation-ish errors', async () => {
+ ctx = buildApp({
+ gObjectCheck: async () => {
+ throw new Error('proposal data exceeds 512 bytes');
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(422);
+ expect(
+ res.body.issues.find((i) => i.code === 'payload_too_large')
+ ).toBeTruthy();
+ });
+
+ test(
+ 'accepts Core\'s canonical success shape { "Object status": "OK" } (Codex round 6 P1)',
+ async () => {
+ // Regression: previously we checked `result.Object === 'success'`,
+ // which is NOT the response Core produces. Core returns
+ // { "Object status": "OK" }
+ // (see syscoin/src/rpc/governance.cpp line 111:
+ // objResult.pushKV("Object status", "OK");
+ // ). With the old check every valid preflight fell through the
+ // reject branch and /prepare surfaced as 422 core_rejected. We
+ // accept mixed casing on the OK string for forward-compat but
+ // require the exact key.
+ ctx = buildApp({
+ gObjectCheck: async () => ({ result: { 'Object status': 'OK' } }),
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const r = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(r.status).toBe(201);
+ expect(r.body.submission.status).toBe('prepared');
+ }
+ );
+
+ test(
+ 'gObjectCheck is called with hex_data only (Codex round 6 P1)',
+ async () => {
+ // Regression: a previous iteration of the adapter mirrored the
+ // 4-tuple `gobject_submit` signature and sent
+ // (parent_hash, revision, time, data_hex) to `gobject_check`.
+ // Core only takes `hex_data`, so it rejected with
+ // RPC_INVALID_PARAMS (too many positional arguments) and the
+ // route's terminal-error classifier then misread that as
+ // 422 core_rejected on valid proposals. Assert the adapter
+ // boundary sees exactly one argument.
+ const argCalls = [];
+ ctx = buildApp({
+ gObjectCheck: async (...args) => {
+ argCalls.push(args);
+ return { result: { 'Object status': 'OK' } };
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(argCalls).toHaveLength(1);
+ expect(argCalls[0]).toHaveLength(1);
+ expect(typeof argCalls[0][0]).toBe('string');
+ expect(/^[0-9a-f]+$/.test(argCalls[0][0])).toBe(true);
+ }
+ );
+
+ test('transient RPC failure is soft-allowed (still 201)', async () => {
+ ctx = buildApp({
+ gObjectCheck: async () => {
+ // Simulate a transport-level error — nothing in the message
+ // matches our "terminal" regex.
+ const e = new Error('ECONNREFUSED 127.0.0.1:8370');
+ throw e;
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(201);
+ expect(res.body.submission.status).toBe('prepared');
+ });
+
+ // Codex PR8 round 11 P1: narrow gObjectCheck terminal-error
+ // matcher. The previous heuristic included a bare /invalid/
+ // regex, which matched JSON-RPC transport/parser errors that
+ // routinely contain the word "invalid" — for example the
+ // messages below from fetch/jsonrpc client layers. Those are
+ // transient outages, NOT Core validation rejects, and must
+ // soft-allow through to a 201 prepare (let the idempotent
+ // replay or a subsequent retry handle it) instead of being
+ // misreported as a permanent 422 core_rejected.
+ test.each([
+ ['Invalid URL'],
+ ['invalid response from server'],
+ ['invalid JSON-RPC response: expected object'],
+ ['invalid utf-8 sequence in headers'],
+ ['request failed: invalid status line'],
+ ])(
+ 'transport error containing "invalid" is soft-allowed not terminal (%s) (Codex round 11 P1)',
+ async (transportMsg) => {
+ ctx = buildApp({
+ gObjectCheck: async () => {
+ throw new Error(transportMsg);
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(201);
+ expect(res.body.submission.status).toBe('prepared');
+ }
+ );
+
+ // Complementary regression: the matcher still has to fire for
+ // actual Core governance-validation phrases. If it does not,
+ // bad payloads silently get filed as `prepared` and the user
+ // only learns the object is garbage once the dispatcher
+ // eventually submits and Core rejects it — a much worse UX
+ // because collateral may already be burned by then.
+ test.each([
+ ['name exceeds 40 characters', 'name_too_long'],
+ ['proposal data exceeds 512 bytes', 'payload_too_large'],
+ ['payment_address is invalid', 'address_invalid'],
+ ['Invalid data hex', null],
+ ['Governance object is not valid - start_epoch', 'epoch_order'],
+ ['Object submission rejected: hash mismatch', null],
+ ])(
+ 'genuine Core reject phrase "%s" still returns 422 (Codex round 11 P1)',
+ async (coreMsg, expectedIssueCode) => {
+ ctx = buildApp({
+ gObjectCheck: async () => {
+ throw new Error(coreMsg);
+ },
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(422);
+ expect(res.body.error).toBe('core_rejected');
+ if (expectedIssueCode) {
+ expect(
+ res.body.issues.some((i) => i.code === expectedIssueCode)
+ ).toBe(true);
+ }
+ }
+ );
+
+ test(
+ 'returns 500 JSON (not an unhandled rejection) when the DB throws during prepare lookup (Codex round 8 P1)',
+ async () => {
+ // Regression: `findPreparedByDataHexForUser` and
+ // `drafts.getByIdForUser` used to run outside any try/catch in
+ // this async handler. A synchronous better-sqlite3 throw
+ // (SQLITE_BUSY / I/O / corrupt-index / temp-write-failed) then
+ // became an unhandled promise rejection — Express 4 does not
+ // catch async handler throws, so in prod it surfaces as a
+ // hung request + a process-level warning rather than a clean
+ // JSON 500 the client can retry.
+ ctx = buildApp();
+ const origFind = ctx.submissions.findPreparedByDataHexForUser;
+ ctx.submissions.findPreparedByDataHexForUser = () => {
+ const err = new Error('SQLITE_BUSY: database is locked');
+ err.code = 'SQLITE_BUSY';
+ throw err;
+ };
+ const unhandled = [];
+ const handler = (reason) => unhandled.push(reason);
+ process.on('unhandledRejection', handler);
+ try {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody());
+ expect(res.status).toBe(500);
+ expect(res.body).toEqual({ error: 'internal' });
+ } finally {
+ ctx.submissions.findPreparedByDataHexForUser = origFind;
+ process.removeListener('unhandledRejection', handler);
+ }
+ // Let any microtasks settle before we assert — an unhandled
+ // rejection would land on the next tick.
+ await new Promise((r) => setImmediate(r));
+ expect(unhandled).toEqual([]);
+ }
+ );
+
+ test('draftId is consumed (deleted) by default on success', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const draft = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'wip' });
+ const draftId = draft.body.draft.id;
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send({ ...validProposalBody(), draftId });
+ expect(res.status).toBe(201);
+ expect(res.body.submission.draftId).toBe(draftId);
+ const gone = await agent.get(`/gov/proposals/drafts/${draftId}`);
+ expect(gone.status).toBe(404);
+ });
+
+ test('consumeDraft=false keeps the draft after prepare', async () => {
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const draft = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'keep me' });
+ const draftId = draft.body.draft.id;
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send({
+ ...validProposalBody(),
+ draftId,
+ consumeDraft: false,
+ });
+ expect(res.status).toBe(201);
+ const stillThere = await agent.get(`/gov/proposals/drafts/${draftId}`);
+ expect(stillThere.status).toBe(200);
+ });
+
+ test(
+ 'concurrent draft delete between pre-read and insert degrades to draftId:null (Codex round 9 P2)',
+ async () => {
+ // Regression: draft ownership USED to be resolved OUTSIDE the
+ // `runAtomic` transaction. A concurrent delete of that draft
+ // (e.g. another tab or an earlier /prepare that raced us) could
+ // therefore invalidate the FK between the cached id and the
+ // actual row, and the subsequent submissions.create would
+ // throw SQLITE_CONSTRAINT (foreign key) — bubbling out as a
+ // generic 500 even though the user's action is a perfectly
+ // normal race we should degrade through.
+ //
+ // Fix: resolve draft ownership inside the same atomic block
+ // that creates the submission. If the draft is gone by the
+ // time we enter the transaction, fall back to draftId:null
+ // instead of 500ing.
+ //
+ // We simulate the race by stubbing `drafts.getByIdForUser` to
+ // return null (as if the row was deleted between the client
+ // sending the request and the transaction starting). With the
+ // fix in place, prepare succeeds with 201 and draftId:null.
+ ctx = buildApp();
+ const { agent, csrf } = await loggedInAgent(ctx);
+ // Create a real draft so the request body's draftId survives
+ // input validation (parseIntId + > 0).
+ const draft = await agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', csrf)
+ .send({ title: 'will-race' });
+ const draftId = draft.body.draft.id;
+
+ // Simulate "concurrent delete landed before /prepare took the
+ // write lock": force the inside-txn lookup to return null.
+ const origGet = ctx.drafts.getByIdForUser;
+ ctx.drafts.getByIdForUser = () => null;
+ try {
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send({ ...validProposalBody(), draftId });
+ expect(res.status).toBe(201);
+ expect(res.body.submission.draftId).toBeNull();
+ } finally {
+ ctx.drafts.getByIdForUser = origGet;
+ }
+ }
+ );
+
+ test('unknown / other-user draftId is ignored (not an error)', async () => {
+ ctx = buildApp();
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+ const draft = await b.agent
+ .post('/gov/proposals/drafts')
+ .set('X-CSRF-Token', b.csrf)
+ .send({ title: 'bs' });
+ const draftId = draft.body.draft.id;
+ const res = await a.agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', a.csrf)
+ .send({ ...validProposalBody(), draftId });
+ expect(res.status).toBe(201);
+ expect(res.body.submission.draftId).toBeNull();
+ // B's draft is untouched.
+ const stillThere = await b.agent.get(`/gov/proposals/drafts/${draftId}`);
+ expect(stillThere.status).toBe(200);
+ });
+
+ // Codex PR8 round 17 P2: /prepare used to `BigInt(f.paymentAmountSats)`
+ // directly, which silently rounded raw JSON numbers above
+ // `Number.MAX_SAFE_INTEGER` at parse time. The submission row's
+ // canonical JSON + proposal_hash would then encode a different
+ // payment amount than the caller typed. Reject unsafe numeric
+ // input up-front so the caller can re-submit as a digit string.
+ test('prepare rejects paymentAmountSats as unsafe JSON number (Codex round 17 P2)', async () => {
+ ctx = buildApp({
+ gObjectCheck: async () => ({ result: { 'Object status': 'OK' } }),
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = validProposalBody();
+ // Strip paymentAmount so the sats path is taken.
+ delete body.paymentAmount;
+ // Send the JSON with a raw unsafe-integer literal in transit.
+ const raw =
+ JSON.stringify(body).replace(/}$/, '') +
+ `, "paymentAmountSats": 9007199254740993}`;
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .set('Content-Type', 'application/json')
+ .send(raw);
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('payment_amount_sats');
+ expect(res.body.issues[0].code).toBe('amount_sats_unsafe_number');
+ });
+
+ test('prepare accepts paymentAmountSats as a large digit string (Codex round 17 P2)', async () => {
+ // Digit strings parse losslessly through BigInt — this is the
+ // recommended wire form for large amounts.
+ ctx = buildApp({
+ gObjectCheck: async () => ({ result: { 'Object status': 'OK' } }),
+ });
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const body = {
+ ...validProposalBody(),
+ paymentAmount: undefined,
+ paymentAmountSats: '100000000000000000', // 10^17 sats, < 2^63
+ };
+ delete body.paymentAmount;
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(body);
+ expect(res.status).toBe(201);
+ });
+});
+
+// -----------------------------------------------------------------------
+// attach-collateral + submissions list/get/delete
+// -----------------------------------------------------------------------
+
+describe('submissions lifecycle', () => {
+ let ctx;
+
+ beforeEach(() => {
+ ctx = buildApp();
+ });
+
+ afterEach(() => {
+ ctx.db.close();
+ });
+
+ async function prepareOne(agent, csrf, overrides = {}) {
+ const res = await agent
+ .post('/gov/proposals/prepare')
+ .set('X-CSRF-Token', csrf)
+ .send(validProposalBody(overrides));
+ if (res.status !== 201) {
+ throw new Error(
+ `prepare failed: ${res.status} ${JSON.stringify(res.body)}`
+ );
+ }
+ return res.body;
+ }
+
+ const FAKE_TXID =
+ '9'.repeat(8) + 'a'.repeat(8) + 'b'.repeat(8) + 'c'.repeat(8) + 'd'.repeat(32);
+
+ test('attach-collateral flips prepared → awaiting_collateral', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ const res = await agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ expect(res.status).toBe(200);
+ expect(res.body.submission.status).toBe('awaiting_collateral');
+ expect(res.body.submission.collateralTxid).toBe(FAKE_TXID.toLowerCase());
+ });
+
+ test('attach-collateral rejects malformed txid (400)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ const res = await agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: 'deadbeef' });
+ expect(res.status).toBe(400);
+ expect(res.body.error).toBe('validation_failed');
+ expect(res.body.issues[0].field).toBe('collateralTxid');
+ });
+
+ test('attach-collateral rejects re-attach (409 status_not_prepared)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ await agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ const res = await agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: 'e'.repeat(64) });
+ expect(res.status).toBe(409);
+ expect(res.body.reason).toBe('status_not_prepared');
+ });
+
+ test('attach-collateral rejects duplicate txid across submissions (409)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const p1 = await prepareOne(agent, csrf);
+ // Slightly different proposal so /prepare creates a NEW row.
+ const p2 = await prepareOne(agent, csrf, {
+ name: 'fund-docs-2',
+ });
+ await agent
+ .post(`/gov/proposals/submissions/${p1.submission.id}/attach-collateral`)
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ const res = await agent
+ .post(`/gov/proposals/submissions/${p2.submission.id}/attach-collateral`)
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ expect(res.status).toBe(409);
+ expect(res.body.reason).toBe('txid_already_used');
+ });
+
+ test('attach-collateral 404 for non-owner', async () => {
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+ const prep = await prepareOne(a.agent, a.csrf);
+ const res = await b.agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', b.csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ expect(res.status).toBe(404);
+ });
+
+ test('list & get enforce ownership', async () => {
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+ const prep = await prepareOne(a.agent, a.csrf);
+
+ const listB = await b.agent.get('/gov/proposals/submissions');
+ expect(listB.body.total).toBe(0);
+
+ const getB = await b.agent.get(
+ `/gov/proposals/submissions/${prep.submission.id}`
+ );
+ expect(getB.status).toBe(404);
+
+ const getA = await a.agent.get(
+ `/gov/proposals/submissions/${prep.submission.id}`
+ );
+ expect(getA.status).toBe(200);
+ expect(getA.body.submission.id).toBe(prep.submission.id);
+ });
+
+ test('delete allowed from prepared (204) and gone after', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ const del = await agent
+ .delete(`/gov/proposals/submissions/${prep.submission.id}`)
+ .set('X-CSRF-Token', csrf);
+ expect(del.status).toBe(204);
+ const after = await agent.get(
+ `/gov/proposals/submissions/${prep.submission.id}`
+ );
+ expect(after.status).toBe(404);
+ });
+
+ test('delete refused from awaiting_collateral (409)', async () => {
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ await agent
+ .post(
+ `/gov/proposals/submissions/${prep.submission.id}/attach-collateral`
+ )
+ .set('X-CSRF-Token', csrf)
+ .send({ collateralTxid: FAKE_TXID });
+ const del = await agent
+ .delete(`/gov/proposals/submissions/${prep.submission.id}`)
+ .set('X-CSRF-Token', csrf);
+ expect(del.status).toBe(409);
+ expect(del.body.reason).toBe('status_not_deletable');
+ });
+
+ test(
+ 'delete returns 409 when a concurrent transition racesthe row out of the deletable set (Codex round 7 P2)',
+ async () => {
+ // Scenario: the DELETE handler pre-reads the row, sees
+ // `prepared`, passes the status gate, then calls
+ // submissions.remove(). In a multi-worker deployment, a
+ // sibling request (attach-collateral from another tab, or a
+ // dispatcher pickup) can flip the status to
+ // `awaiting_collateral` between that pre-read and the DELETE
+ // statement. The repo's partial DELETE is guarded
+ // (`status IN ('prepared','failed')`) so it returns 0 changes
+ // — but the handler used to blindly return 204 anyway, which
+ // tells the client the submission is gone while it is in fact
+ // still alive and can run to completion on-chain.
+ //
+ // Fix (R7 P2): route checks `changes` and, when zero, re-reads
+ // to pick the right failure code — 409 status_not_deletable
+ // (raced to a non-deletable state) or 404 (raced to
+ // `deleted`, which today can only happen from another tab).
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ const id = prep.submission.id;
+
+ // Monkey-patch the submissions repo so `remove()` returns 0
+ // and the partial DELETE actually didn't fire (we simulate a
+ // concurrent transition to awaiting_collateral by flipping
+ // the row directly via attachCollateral just before remove).
+ const origRemove = ctx.submissions.remove;
+ ctx.submissions.remove = (rowId, userId) => {
+ // Simulate the concurrent transition — this is what
+ // another worker would have done between the pre-read and
+ // our DELETE.
+ ctx.submissions.attachCollateral(rowId, userId, 'a'.repeat(64));
+ return origRemove(rowId, userId);
+ };
+
+ try {
+ const del = await agent
+ .delete(`/gov/proposals/submissions/${id}`)
+ .set('X-CSRF-Token', csrf);
+ expect(del.status).toBe(409);
+ expect(del.body.reason).toBe('status_not_deletable');
+ } finally {
+ ctx.submissions.remove = origRemove;
+ }
+
+ // Row still exists, in its raced-to status.
+ const stillThere = await agent.get(
+ `/gov/proposals/submissions/${id}`
+ );
+ expect(stillThere.status).toBe(200);
+ expect(stillThere.body.submission.status).toBe('awaiting_collateral');
+ }
+ );
+
+ test(
+ 'delete returns 404 when the row was deleted concurrently (Codex round 7 P2)',
+ async () => {
+ // Same class of race as the previous test, but the concurrent
+ // worker deletes the row outright (another tab DELETE'd it).
+ // The repo's pre-read hit it, but by the time we call remove
+ // the row is gone — 0 changes and re-read returns null. The
+ // handler must surface that as 404 so the UI doesn't pretend
+ // it just deleted something it didn't.
+ const { agent, csrf } = await loggedInAgent(ctx);
+ const prep = await prepareOne(agent, csrf);
+ const id = prep.submission.id;
+
+ const origRemove = ctx.submissions.remove;
+ ctx.submissions.remove = (rowId, userId) => {
+ // Concurrent deletion by another tab
+ origRemove(rowId, userId);
+ // Report 0 changes for *our* call, as if a raced sibling
+ // already consumed the row.
+ return 0;
+ };
+
+ try {
+ const del = await agent
+ .delete(`/gov/proposals/submissions/${id}`)
+ .set('X-CSRF-Token', csrf);
+ expect(del.status).toBe(404);
+ expect(del.body.error).toBe('not_found');
+ } finally {
+ ctx.submissions.remove = origRemove;
+ }
+ }
+ );
+
+ test('delete 404 when row belongs to another user', async () => {
+ const a = await loggedInAgent(ctx, 'a@example.com');
+ const b = await loggedInAgent(ctx, 'b@example.com');
+ const prep = await prepareOne(a.agent, a.csrf);
+ const del = await b.agent
+ .delete(`/gov/proposals/submissions/${prep.submission.id}`)
+ .set('X-CSRF-Token', b.csrf);
+ expect(del.status).toBe(404);
+ });
+});
+
+// -----------------------------------------------------------------------
+// Factory validation — protects wiring regressions.
+// -----------------------------------------------------------------------
+
+describe('createGovProposalsRouter: factory argument validation', () => {
+ test('drafts repo required', () => {
+ expect(() =>
+ createGovProposalsRouter({
+ submissions: { create() {} },
+ sessionMw: { requireAuth: () => {}, parse: () => {} },
+ csrfMw: { require: () => {} },
+ runAtomic: () => {},
+ })
+ ).toThrow(/drafts/);
+ });
+ test('submissions repo required', () => {
+ expect(() =>
+ createGovProposalsRouter({
+ drafts: { create() {} },
+ sessionMw: { requireAuth: () => {}, parse: () => {} },
+ csrfMw: { require: () => {} },
+ runAtomic: () => {},
+ })
+ ).toThrow(/submissions/);
+ });
+ test('sessionMw required', () => {
+ expect(() =>
+ createGovProposalsRouter({
+ drafts: { create() {} },
+ submissions: { create() {} },
+ csrfMw: { require: () => {} },
+ runAtomic: () => {},
+ })
+ ).toThrow(/sessionMw/);
+ });
+ test('csrfMw required', () => {
+ expect(() =>
+ createGovProposalsRouter({
+ drafts: { create() {} },
+ submissions: { create() {} },
+ sessionMw: { requireAuth: () => {}, parse: () => {} },
+ runAtomic: () => {},
+ })
+ ).toThrow(/csrfMw/);
+ });
+ test('runAtomic required', () => {
+ expect(() =>
+ createGovProposalsRouter({
+ drafts: { create() {} },
+ submissions: { create() {} },
+ sessionMw: { requireAuth: () => {}, parse: () => {} },
+ csrfMw: { require: () => {} },
+ })
+ ).toThrow(/runAtomic/);
+ });
+});
|