Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/db-migration-backwards-compatibility.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ jobs:
- name: Wait on Svix
run: pnpx wait-on tcp:localhost:8113

- name: Wait on ClickHouse
run: pnpx wait-on http://localhost:8136/ping

- name: Initialize database
run: pnpm run db:init

Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/e2e-api-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ jobs:
- name: Wait on QStash
run: pnpx wait-on tcp:localhost:8125

- name: Wait on ClickHouse
run: pnpx wait-on http://localhost:8136/ping

- name: Initialize database
run: pnpm run db:init

Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/e2e-custom-base-port-api-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ jobs:
- name: Wait on QStash
run: pnpx wait-on tcp:localhost:6725

- name: Wait on ClickHouse
run: pnpx wait-on http://localhost:6736/ping

- name: Initialize database
run: pnpm run db:init

Expand Down
26 changes: 13 additions & 13 deletions apps/backend/scripts/clickhouse-migrations.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,28 +70,28 @@ ALTER TABLE analytics_internal.events
UPDATE
data = CAST(concat(
'{',
'\"refresh_token_id\":', toJSONString(JSONExtractString(toJSONString(data), 'refreshTokenId')), ',',
'\"is_anonymous\":', toJSONString(JSONExtract(toJSONString(data), 'isAnonymous', 'Bool')), ',',
'\"ip_info\":', if(
JSONExtractString(toJSONString(data), 'ipInfo.ip') = '',
'"refresh_token_id":', toJSONString(data.refreshTokenId::String), ',',
'"is_anonymous":', if(ifNull(data.isAnonymous::Nullable(Bool), false), 'true', 'false'), ',',
'"ip_info":', if(
isNull(data.ipInfo.ip::Nullable(String)),
'null',
concat(
'{',
'\"ip\":', toJSONString(JSONExtractString(toJSONString(data), 'ipInfo.ip')), ',',
'\"is_trusted\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.isTrusted', 'Bool')), ',',
'\"country_code\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.countryCode', 'Nullable(String)')), ',',
'\"region_code\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.regionCode', 'Nullable(String)')), ',',
'\"city_name\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.cityName', 'Nullable(String)')), ',',
'\"latitude\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.latitude', 'Nullable(Float64)')), ',',
'\"longitude\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.longitude', 'Nullable(Float64)')), ',',
'\"tz_identifier\":', toJSONString(JSONExtract(toJSONString(data), 'ipInfo.tzIdentifier', 'Nullable(String)')),
'"ip":', toJSONString(data.ipInfo.ip::String), ',',
'"is_trusted":', if(ifNull(data.ipInfo.isTrusted::Nullable(Bool), false), 'true', 'false'), ',',
'"country_code":', if(isNull(data.ipInfo.countryCode::Nullable(String)), 'null', toJSONString(data.ipInfo.countryCode::String)), ',',
'"region_code":', if(isNull(data.ipInfo.regionCode::Nullable(String)), 'null', toJSONString(data.ipInfo.regionCode::String)), ',',
'"city_name":', if(isNull(data.ipInfo.cityName::Nullable(String)), 'null', toJSONString(data.ipInfo.cityName::String)), ',',
'"latitude":', if(isNull(data.ipInfo.latitude::Nullable(Float64)), 'null', toString(data.ipInfo.latitude::Float64)), ',',
'"longitude":', if(isNull(data.ipInfo.longitude::Nullable(Float64)), 'null', toString(data.ipInfo.longitude::Float64)), ',',
'"tz_identifier":', if(isNull(data.ipInfo.tzIdentifier::Nullable(String)), 'null', toJSONString(data.ipInfo.tzIdentifier::String)),
'}'
)
),
'}'
) AS JSON)
WHERE event_type = '$token-refresh'
AND JSONHas(toJSONString(data), 'refreshTokenId');
AND data.refreshTokenId::Nullable(String) IS NOT NULL;
`;

// Normalizes legacy $sign-up-rule-trigger rows (camelCase JSON) to the new format:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,11 @@ export const GET = createSmartRouteHandler({
}

const flowControl = options.flowControl as UpstashRequest["flowControl"];
const deduplicationId = options.deduplicationId as UpstashRequest["deduplicationId"];

return {
url: fullUrl,
body: options.body,
...(flowControl ? { flowControl } : {}),
...(deduplicationId ? { deduplicationId } : {})
};
}

Expand Down
3 changes: 1 addition & 2 deletions apps/backend/src/lib/external-db-sync-queue.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ export async function enqueueExternalDbSyncBatch(tenancyIds: string[]): Promise<
json_build_object(
'url', '/api/latest/internal/external-db-sync/sync-engine',
'body', json_build_object('tenancyId', t.tenancy_id),
'flowControl', json_build_object('key', 'sentinel-sync-key', 'parallelism', 20),
'deduplicationId', t.tenancy_id
'flowControl', json_build_object('key', 'sentinel-sync-key', 'parallelism', 20)
),
NULL,
'sentinel-sync-key-' || t.tenancy_id
Expand Down
6 changes: 6 additions & 0 deletions apps/backend/src/lib/external-db-sync.ts
Original file line number Diff line number Diff line change
Expand Up @@ -566,6 +566,9 @@ async function syncPostgresMapping(
if (rows.length === 0) {
break;
}
if (rows.length > 1) {
console.log("db-sync-postgres: more than 1 row returned from source db fetch", { tenancyId, numRows: rows.length });
}
Comment on lines +569 to +571
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider using captureError instead of console.log

The rest of this file uses captureError for reporting unexpected conditions (lines 731, 735, 775, 779, 793). Using console.log here means this diagnostic signal won't appear in your error tracking/alerting system. If this is expected in normal operation (e.g., batch queries returning multiple rows), then a console.log is fine as a temporary debug aid. But if "more than 1 row" indicates an unexpected condition worth monitoring, captureError would be more consistent with the existing patterns in this file.

Note: If this suggestion doesn't match your team's coding style, reply to this and let me know. I'll remember it for next time!


await pushRowsToExternalDb(
externalClient,
Expand Down Expand Up @@ -644,6 +647,9 @@ async function syncClickhouseMapping(
if (rows.length === 0) {
break;
}
if (rows.length > 1) {
console.log("db-sync-clickhouse: more than 1 row returned from source db fetch", { tenancyId, numRows: rows.length });
}

await pushRowsToClickhouse(
client,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,8 @@ describe("sign-up rules", () => {
"status": 403,
"body": {
"code": "SIGN_UP_REJECTED",
"details": { "message": "Your sign up was rejected. Please contact us for more information." },
"error": "Your sign up was rejected. Please contact us for more information.",
"details": { "message": "Your sign up was rejected by an administrator's sign-up rule." },
"error": "Your sign up was rejected by an administrator's sign-up rule.",
},
"headers": Headers {
"x-stack-known-error": "SIGN_UP_REJECTED",
Expand Down
3 changes: 2 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
"stop-deps": "POSTGRES_DELAY_MS=0 pnpm run deps-compose kill && POSTGRES_DELAY_MS=0 pnpm run deps-compose down -v",
"wait-until-postgres-is-ready:pg_isready": "until pg_isready -h localhost -p ${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}28 && pg_isready -h localhost -p ${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}34; do sleep 1; done",
"wait-until-postgres-is-ready": "command -v pg_isready >/dev/null 2>&1 && pnpm run wait-until-postgres-is-ready:pg_isready || sleep 10 # not everyone has pg_isready installed, so we fallback to sleeping",
"start-deps:no-delay": "pnpm pre && pnpm run deps-compose up --detach --build && pnpm run wait-until-postgres-is-ready && pnpm run db:init && echo \"\\nDependencies started in the background as Docker containers. 'pnpm run stop-deps' to stop them\"n",
"wait-until-clickhouse-is-ready": "pnpx wait-on http://localhost:${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}36/ping",
"start-deps:no-delay": "pnpm pre && pnpm run deps-compose up --detach --build && pnpm run wait-until-postgres-is-ready && pnpm run wait-until-clickhouse-is-ready && pnpm run db:init && echo \"\\nDependencies started in the background as Docker containers. 'pnpm run stop-deps' to stop them\"n",
"start-deps": "POSTGRES_DELAY_MS=${POSTGRES_DELAY_MS:-0} pnpm run start-deps:no-delay",
"restart-deps": "pnpm pre && pnpm run stop-deps && pnpm run start-deps",
"restart-deps:no-delay": "pnpm pre && pnpm run stop-deps && pnpm run start-deps:no-delay",
Expand Down
6 changes: 3 additions & 3 deletions packages/stack-shared/src/config/db-sync-mappings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ export const DEFAULT_DB_SYNC_MAPPINGS = {
primary_email Nullable(String),
primary_email_verified UInt8,
signed_up_at DateTime64(3, 'UTC'),
client_metadata JSON,
client_read_only_metadata JSON,
server_metadata JSON,
client_metadata String,
client_read_only_metadata String,
server_metadata String,
is_anonymous UInt8,
restricted_by_admin UInt8,
restricted_by_admin_reason Nullable(String),
Expand Down
Loading