Skip to content

Commit 9cf7eed

Browse files
committed
refactor: use QuoteUtils for identifier quoting, remove backward-compat code
- Replace raw string interpolation ("${schema}"."${table}") with QuoteUtils.quoteQualifiedIdentifier() from @pgsql/quotes in both presigned-url-plugin and bucket-provisioner-plugin - Remove LEGACY_STORAGE_MODULE_QUERY constants from both plugins - Remove schemaSupportsMultiScope module-level flags - Remove all SAVEPOINT-based schema detection logic - Simplify resolveStorageModule(), getStorageModuleConfig(), getStorageModuleConfigForOwner(), and resolveStorageModuleByFileId() to use direct queries without fallback
1 parent 3865697 commit 9cf7eed

5 files changed

Lines changed: 2610 additions & 7651 deletions

File tree

graphile/graphile-bucket-provisioner-plugin/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@
4141
},
4242
"dependencies": {
4343
"@constructive-io/bucket-provisioner": "workspace:^",
44-
"@pgpmjs/logger": "workspace:^"
44+
"@pgpmjs/logger": "workspace:^",
45+
"@pgsql/quotes": "^17.1.0"
4546
},
4647
"peerDependencies": {
4748
"grafast": "1.0.0",

graphile/graphile-bucket-provisioner-plugin/src/plugin.ts

Lines changed: 14 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import { context as grafastContext, lambda, object } from 'grafast';
3535
import type { GraphileConfig } from 'graphile-config';
3636
import { extendSchema, gql } from 'graphile-utils';
3737
import { Logger } from '@pgpmjs/logger';
38+
import { QuoteUtils } from '@pgsql/quotes';
3839
import {
3940
BucketProvisioner,
4041
} from '@constructive-io/bucket-provisioner';
@@ -71,31 +72,8 @@ const APP_STORAGE_MODULE_QUERY = `
7172
LIMIT 1
7273
`;
7374

74-
/**
75-
* Legacy query for databases without the multi-scope schema.
76-
* Returns NULL for scope-related fields so the row shape matches StorageModuleRow.
77-
*/
78-
const LEGACY_STORAGE_MODULE_QUERY = `
79-
SELECT
80-
sm.id,
81-
NULL::int AS membership_type,
82-
NULL::uuid AS entity_table_id,
83-
bs.schema_name AS buckets_schema,
84-
bt.name AS buckets_table,
85-
sm.endpoint,
86-
sm.public_url_prefix,
87-
sm.provider,
88-
sm.allowed_origins
89-
FROM metaschema_modules_public.storage_module sm
90-
JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
91-
JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
92-
WHERE sm.database_id = $1
93-
LIMIT 1
94-
`;
95-
9675
/**
9776
* Resolve ALL storage modules for a database (for ownerId-based resolution).
98-
* Requires the multi-scope schema.
9977
*/
10078
const ALL_STORAGE_MODULES_QUERY = `
10179
SELECT
@@ -118,12 +96,6 @@ const ALL_STORAGE_MODULES_QUERY = `
11896
WHERE sm.database_id = $1
11997
`;
12098

121-
/**
122-
* Module-level flag to track whether the database schema supports multi-scope storage.
123-
* null = not yet detected, true = new schema (has membership_type), false = legacy schema.
124-
*/
125-
let schemaSupportsMultiScope: boolean | null = null;
126-
12799
interface StorageModuleRow {
128100
id: string;
129101
membership_type: number | null;
@@ -149,65 +121,20 @@ async function resolveStorageModule(
149121
ownerId?: string,
150122
): Promise<StorageModuleRow | null> {
151123
if (!ownerId) {
152-
// App-level resolution with backward compatibility
153-
if (schemaSupportsMultiScope === false) {
154-
const result = await pgClient.query(LEGACY_STORAGE_MODULE_QUERY, [databaseId]);
155-
return (result.rows[0] as StorageModuleRow) ?? null;
156-
}
157-
try {
158-
// Use SAVEPOINT so a failed probe doesn't abort the surrounding transaction
159-
await pgClient.query('SAVEPOINT storage_module_probe');
160-
const result = await pgClient.query(APP_STORAGE_MODULE_QUERY, [databaseId]);
161-
await pgClient.query('RELEASE SAVEPOINT storage_module_probe');
162-
schemaSupportsMultiScope = true;
163-
return (result.rows[0] as StorageModuleRow) ?? null;
164-
} catch (err: any) {
165-
if (err.code === '42703' || err.message?.includes('does not exist')) {
166-
log.debug('Multi-scope schema not detected, falling back to legacy query');
167-
schemaSupportsMultiScope = false;
168-
await pgClient.query('ROLLBACK TO SAVEPOINT storage_module_probe');
169-
await pgClient.query('RELEASE SAVEPOINT storage_module_probe');
170-
const result = await pgClient.query(LEGACY_STORAGE_MODULE_QUERY, [databaseId]);
171-
return (result.rows[0] as StorageModuleRow) ?? null;
172-
}
173-
try { await pgClient.query('ROLLBACK TO SAVEPOINT storage_module_probe'); } catch { /* ignore */ }
174-
try { await pgClient.query('RELEASE SAVEPOINT storage_module_probe'); } catch { /* ignore */ }
175-
throw err;
176-
}
177-
}
178-
179-
// Entity-scoped resolution requires the multi-scope schema
180-
if (schemaSupportsMultiScope === false) {
181-
log.debug('Legacy schema detected — entity-scoped storage not available');
182-
return null;
183-
}
184-
185-
// Load all modules and probe entity tables
186-
let modules: StorageModuleRow[];
187-
try {
188-
await pgClient.query('SAVEPOINT storage_module_probe');
189-
const result = await pgClient.query(ALL_STORAGE_MODULES_QUERY, [databaseId]);
190-
await pgClient.query('RELEASE SAVEPOINT storage_module_probe');
191-
schemaSupportsMultiScope = true;
192-
modules = result.rows as StorageModuleRow[];
193-
} catch (err: any) {
194-
if (err.code === '42703' || err.message?.includes('does not exist')) {
195-
log.debug('Multi-scope schema not detected during owner resolution');
196-
schemaSupportsMultiScope = false;
197-
await pgClient.query('ROLLBACK TO SAVEPOINT storage_module_probe');
198-
await pgClient.query('RELEASE SAVEPOINT storage_module_probe');
199-
return null;
200-
}
201-
try { await pgClient.query('ROLLBACK TO SAVEPOINT storage_module_probe'); } catch { /* ignore */ }
202-
try { await pgClient.query('RELEASE SAVEPOINT storage_module_probe'); } catch { /* ignore */ }
203-
throw err;
124+
// App-level resolution
125+
const result = await pgClient.query(APP_STORAGE_MODULE_QUERY, [databaseId]);
126+
return (result.rows[0] as StorageModuleRow) ?? null;
204127
}
205128

129+
// Entity-scoped: load all modules and probe entity tables
130+
const result = await pgClient.query(ALL_STORAGE_MODULES_QUERY, [databaseId]);
131+
const modules = result.rows as StorageModuleRow[];
206132
const entityModules = modules.filter((m) => m.entity_schema && m.entity_table);
207133

208134
for (const mod of entityModules) {
135+
const entityTable = QuoteUtils.quoteQualifiedIdentifier(mod.entity_schema!, mod.entity_table!);
209136
const probe = await pgClient.query(
210-
`SELECT 1 FROM "${mod.entity_schema}"."${mod.entity_table}" WHERE id = $1 LIMIT 1`,
137+
`SELECT 1 FROM ${entityTable} WHERE id = $1 LIMIT 1`,
211138
[ownerId],
212139
);
213140
if (probe.rows.length > 0) {
@@ -500,14 +427,15 @@ export function createBucketProvisionerPlugin(
500427

501428
// Look up the bucket row (RLS enforced via pgSettings)
502429
const hasOwner = ownerId && storageModule.membership_type !== null;
430+
const bucketsTable = QuoteUtils.quoteQualifiedIdentifier(storageModule.buckets_schema, storageModule.buckets_table);
503431
const bucketResult = await pgClient.query(
504432
hasOwner
505433
? `SELECT id, key, type, is_public, allowed_origins
506-
FROM "${storageModule.buckets_schema}"."${storageModule.buckets_table}"
434+
FROM ${bucketsTable}
507435
WHERE key = $1 AND owner_id = $2
508436
LIMIT 1`
509437
: `SELECT id, key, type, is_public, allowed_origins
510-
FROM "${storageModule.buckets_schema}"."${storageModule.buckets_table}"
438+
FROM ${bucketsTable}
511439
WHERE key = $1
512440
LIMIT 1`,
513441
hasOwner ? [bucketKey, ownerId] : [bucketKey],
@@ -697,9 +625,10 @@ export function createBucketProvisionerPlugin(
697625
}
698626

699627
// Read the full bucket row (post-update) to get type + origins
628+
const bucketsTable = QuoteUtils.quoteQualifiedIdentifier(storageModule.buckets_schema, storageModule.buckets_table);
700629
const bucketResult = await pgClient.query(
701630
`SELECT id, key, type, is_public, allowed_origins
702-
FROM "${storageModule.buckets_schema}"."${storageModule.buckets_table}"
631+
FROM ${bucketsTable}
703632
WHERE key = $1
704633
LIMIT 1`,
705634
[patchKey],

graphile/graphile-presigned-url-plugin/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
"@aws-sdk/client-s3": "^3.1009.0",
4444
"@aws-sdk/s3-request-presigner": "^3.1009.0",
4545
"@pgpmjs/logger": "workspace:^",
46+
"@pgsql/quotes": "^17.1.0",
4647
"lru-cache": "^11.2.7"
4748
},
4849
"peerDependencies": {

graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts

Lines changed: 10 additions & 140 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { Logger } from '@pgpmjs/logger';
22
import { LRUCache } from 'lru-cache';
3+
import { QuoteUtils } from '@pgsql/quotes';
34
import type { StorageModuleConfig, BucketConfig } from './types';
45

56
const log = new Logger('graphile-presigned-url:cache');
@@ -72,54 +73,6 @@ const APP_STORAGE_MODULE_QUERY = `
7273
LIMIT 1
7374
`;
7475

75-
/**
76-
* Legacy SQL query for databases without the multi-scope schema.
77-
*
78-
* Falls back to the original LIMIT 1 pattern when membership_type column
79-
* doesn't exist (pre-PR#876 schema). Returns NULL for scope-related fields
80-
* so the row shape matches StorageModuleRow.
81-
*/
82-
const LEGACY_STORAGE_MODULE_QUERY = `
83-
SELECT
84-
sm.id,
85-
NULL::int AS membership_type,
86-
NULL::uuid AS entity_table_id,
87-
bs.schema_name AS buckets_schema,
88-
bt.name AS buckets_table,
89-
fs.schema_name AS files_schema,
90-
ft.name AS files_table,
91-
urs.schema_name AS upload_requests_schema,
92-
urt.name AS upload_requests_table,
93-
sm.endpoint,
94-
sm.public_url_prefix,
95-
sm.provider,
96-
sm.allowed_origins,
97-
sm.upload_url_expiry_seconds,
98-
sm.download_url_expiry_seconds,
99-
sm.default_max_file_size,
100-
sm.max_filename_length,
101-
sm.cache_ttl_seconds,
102-
NULL AS entity_schema,
103-
NULL AS entity_table
104-
FROM metaschema_modules_public.storage_module sm
105-
JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
106-
JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
107-
JOIN metaschema_public.table ft ON ft.id = sm.files_table_id
108-
JOIN metaschema_public.schema fs ON fs.id = ft.schema_id
109-
JOIN metaschema_public.table urt ON urt.id = sm.upload_requests_table_id
110-
JOIN metaschema_public.schema urs ON urs.id = urt.schema_id
111-
WHERE sm.database_id = $1
112-
LIMIT 1
113-
`;
114-
115-
/**
116-
* Module-level flag to track whether the database schema supports multi-scope storage.
117-
*
118-
* null = not yet detected, true = new schema (has membership_type), false = legacy schema.
119-
* Once detected, avoids repeated failed queries on legacy schemas.
120-
*/
121-
let schemaSupportsMultiScope: boolean | null = null;
122-
12376
/**
12477
* SQL query to resolve ALL storage modules for a database (app-level + entity-scoped).
12578
*
@@ -190,17 +143,17 @@ function buildConfig(row: StorageModuleRow): StorageModuleConfig {
190143
const cacheTtlSeconds = row.cache_ttl_seconds ?? DEFAULT_CACHE_TTL_SECONDS;
191144
return {
192145
id: row.id,
193-
bucketsQualifiedName: `"${row.buckets_schema}"."${row.buckets_table}"`,
194-
filesQualifiedName: `"${row.files_schema}"."${row.files_table}"`,
195-
uploadRequestsQualifiedName: `"${row.upload_requests_schema}"."${row.upload_requests_table}"`,
146+
bucketsQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.buckets_schema, row.buckets_table),
147+
filesQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.files_schema, row.files_table),
148+
uploadRequestsQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.upload_requests_schema, row.upload_requests_table),
196149
schemaName: row.buckets_schema,
197150
bucketsTableName: row.buckets_table,
198151
filesTableName: row.files_table,
199152
uploadRequestsTableName: row.upload_requests_table,
200153
membershipType: row.membership_type,
201154
entityTableId: row.entity_table_id,
202155
entityQualifiedName: row.entity_schema && row.entity_table
203-
? `"${row.entity_schema}"."${row.entity_table}"`
156+
? QuoteUtils.quoteQualifiedIdentifier(row.entity_schema, row.entity_table)
204157
: null,
205158
endpoint: row.endpoint,
206159
publicUrlPrefix: row.public_url_prefix,
@@ -236,34 +189,7 @@ export async function getStorageModuleConfig(
236189

237190
log.debug(`Cache miss for app-level storage in database ${databaseId}, querying metaschema...`);
238191

239-
let result: { rows: unknown[] };
240-
241-
if (schemaSupportsMultiScope === false) {
242-
// Known legacy schema — skip the new query
243-
result = await pgClient.query({ text: LEGACY_STORAGE_MODULE_QUERY, values: [databaseId] });
244-
} else {
245-
try {
246-
// Use SAVEPOINT so a failed probe doesn't abort the surrounding transaction
247-
await pgClient.query({ text: 'SAVEPOINT storage_module_probe' });
248-
result = await pgClient.query({ text: APP_STORAGE_MODULE_QUERY, values: [databaseId] });
249-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
250-
schemaSupportsMultiScope = true;
251-
} catch (err: any) {
252-
// PostgreSQL error 42703 = "column does not exist"
253-
if (err.code === '42703' || err.message?.includes('does not exist')) {
254-
log.debug('Multi-scope schema not detected, falling back to legacy query');
255-
schemaSupportsMultiScope = false;
256-
await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' });
257-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
258-
result = await pgClient.query({ text: LEGACY_STORAGE_MODULE_QUERY, values: [databaseId] });
259-
} else {
260-
// Release savepoint even on unexpected errors
261-
try { await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
262-
try { await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
263-
throw err;
264-
}
265-
}
266-
}
192+
const result = await pgClient.query({ text: APP_STORAGE_MODULE_QUERY, values: [databaseId] });
267193

268194
if (result.rows.length === 0) {
269195
log.warn(`No app-level storage module found for database ${databaseId}`);
@@ -297,12 +223,6 @@ export async function getStorageModuleConfigForOwner(
297223
databaseId: string,
298224
ownerId: string,
299225
): Promise<StorageModuleConfig | null> {
300-
// Entity-scoped resolution requires the multi-scope schema
301-
if (schemaSupportsMultiScope === false) {
302-
log.debug('Legacy schema detected — entity-scoped storage not available');
303-
return null;
304-
}
305-
306226
// Check if we already have a cached mapping for this ownerId
307227
const ownerCacheKey = `storage:${databaseId}:owner:${ownerId}`;
308228
const cachedOwner = storageModuleCache.get(ownerCacheKey);
@@ -324,24 +244,7 @@ export async function getStorageModuleConfigForOwner(
324244

325245
if (allConfigs.length === 0) {
326246
log.debug(`Loading all storage modules for database ${databaseId} to resolve ownerId ${ownerId}`);
327-
let result: { rows: unknown[] };
328-
try {
329-
await pgClient.query({ text: 'SAVEPOINT storage_module_probe' });
330-
result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] });
331-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
332-
schemaSupportsMultiScope = true;
333-
} catch (err: any) {
334-
if (err.code === '42703' || err.message?.includes('does not exist')) {
335-
log.debug('Multi-scope schema not detected during owner resolution');
336-
schemaSupportsMultiScope = false;
337-
await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' });
338-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
339-
return null;
340-
}
341-
try { await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
342-
try { await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
343-
throw err;
344-
}
247+
const result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] });
345248
allConfigs = (result.rows as StorageModuleRow[]).map(buildConfig);
346249

347250
// Cache each individual config by its membership type
@@ -395,34 +298,9 @@ export async function resolveStorageModuleByFileId(
395298
// Load all storage modules for this database
396299
log.debug(`Resolving file ${fileId} across all storage modules for database ${databaseId}`);
397300

398-
let allConfigs: StorageModuleConfig[];
399-
400-
if (schemaSupportsMultiScope === false) {
401-
// Legacy schema — only one storage module, use the legacy query
402-
const result = await pgClient.query({ text: LEGACY_STORAGE_MODULE_QUERY, values: [databaseId] });
403-
allConfigs = (result.rows as StorageModuleRow[]).map(buildConfig);
404-
} else {
405-
try {
406-
await pgClient.query({ text: 'SAVEPOINT storage_module_probe' });
407-
const result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] });
408-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
409-
schemaSupportsMultiScope = true;
410-
allConfigs = (result.rows as StorageModuleRow[]).map(buildConfig);
411-
} catch (err: any) {
412-
if (err.code === '42703' || err.message?.includes('does not exist')) {
413-
log.debug('Multi-scope schema not detected during file resolution, falling back');
414-
schemaSupportsMultiScope = false;
415-
await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' });
416-
await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' });
417-
const result = await pgClient.query({ text: LEGACY_STORAGE_MODULE_QUERY, values: [databaseId] });
418-
allConfigs = (result.rows as StorageModuleRow[]).map(buildConfig);
419-
} else {
420-
try { await pgClient.query({ text: 'ROLLBACK TO SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
421-
try { await pgClient.query({ text: 'RELEASE SAVEPOINT storage_module_probe' }); } catch { /* ignore */ }
422-
throw err;
423-
}
424-
}
425-
}
301+
const allConfigs = (await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] })).rows.map(
302+
(row: unknown) => buildConfig(row as StorageModuleRow),
303+
);
426304

427305
// Probe each module's files table for the fileId
428306
for (const config of allConfigs) {
@@ -470,13 +348,6 @@ const bucketCache = new LRUCache<string, BucketConfig>({
470348
* On cache miss, queries the bucket table (RLS-enforced via pgSettings on
471349
* the pgClient). On cache hit, returns the cached metadata directly.
472350
*
473-
* @param pgClient - A pg client from the Graphile context
474-
* @param storageConfig - The resolved StorageModuleConfig for this database
475-
* @param databaseId - The metaschema database UUID (used as cache key prefix)
476-
* @param bucketKey - The bucket key (e.g., "public", "private")
477-
* @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
478-
*/
479-
/**
480351
* @param pgClient - A pg client from the Graphile context
481352
* @param storageConfig - The resolved StorageModuleConfig for this database/scope
482353
* @param databaseId - The metaschema database UUID (used as cache key prefix)
@@ -585,7 +456,6 @@ export function clearStorageModuleCache(): void {
585456
storageModuleCache.clear();
586457
bucketCache.clear();
587458
provisionedBuckets.clear();
588-
schemaSupportsMultiScope = null;
589459
}
590460

591461
/**

0 commit comments

Comments
 (0)