|
9 | 9 | * 2. Upload fields — adds `requestUploadUrl` and `requestBulkUploadUrls` fields |
10 | 10 | * on `@storageBuckets`-tagged types, so clients upload via the typed bucket API. |
11 | 11 | * |
12 | | - * 3. downloadUrl — handled by download-url-field.ts (separate plugin). |
| 12 | + * 3. Mutation entry points — adds per-bucket mutation fields on the root Mutation |
| 13 | + * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations |
| 14 | + * can be accessed as proper GraphQL mutations instead of queries. |
13 | 15 | * |
14 | | - * No global mutations — all S3 operations are scoped to the per-table types that |
15 | | - * PostGraphile already generates. Scope resolution uses the codec's schema/table |
16 | | - * name matched against cached storage module configs. |
| 16 | + * 4. downloadUrl — handled by download-url-field.ts (separate plugin). |
| 17 | + * |
| 18 | + * Scope resolution uses the codec's schema/table name matched against |
| 19 | + * cached storage module configs. |
17 | 20 | */ |
18 | 21 |
|
19 | | -import { context as grafastContext, lambda, object } from 'grafast'; |
| 22 | +import { access, context as grafastContext, lambda, object } from 'grafast'; |
20 | 23 | import type { GraphileConfig } from 'graphile-config'; |
21 | 24 | import 'graphile-build'; |
22 | 25 | import { Logger } from '@pgpmjs/logger'; |
23 | 26 |
|
24 | 27 | import type { PresignedUrlPluginOptions, S3Config, StorageModuleConfig, BucketConfig } from './types'; |
25 | | -import { loadAllStorageModules, resolveStorageConfigFromCodec, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; |
| 28 | +import { loadAllStorageModules, resolveStorageConfigFromCodec, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; |
26 | 29 | import { generatePresignedPutUrl, deleteS3Object } from './s3-signer'; |
27 | 30 |
|
28 | 31 | const log = new Logger('graphile-presigned-url:plugin'); |
@@ -145,9 +148,96 @@ export function createPresignedUrlPlugin( |
145 | 148 | */ |
146 | 149 | GraphQLObjectType_fields(fields, build, context) { |
147 | 150 | const { |
148 | | - scope: { pgCodec, isPgClassType }, |
| 151 | + scope: { pgCodec, isPgClassType, isRootMutation }, |
149 | 152 | } = context as any; |
150 | 153 |
|
| 154 | + // --- Path 1: Add per-bucket mutation entry points on root Mutation --- |
| 155 | + if (isRootMutation) { |
| 156 | + const { |
| 157 | + graphql: { GraphQLString, GraphQLNonNull }, |
| 158 | + } = build; |
| 159 | + |
| 160 | + const bucketCodecs = Object.values((build.input as any).pgRegistry.pgCodecs).filter( |
| 161 | + (codec: any) => codec.attributes && (codec.extensions as any)?.tags?.storageBuckets, |
| 162 | + ); |
| 163 | + |
| 164 | + if (bucketCodecs.length === 0) return fields; |
| 165 | + |
| 166 | + const newFields: Record<string, any> = {}; |
| 167 | + for (const codec of bucketCodecs as any[]) { |
| 168 | + const typeName = (build.inflection as any).tableType(codec); |
| 169 | + const bucketType = build.getTypeByName(typeName); |
| 170 | + if (!bucketType) { |
| 171 | + log.debug(`Skipping mutation entry point for ${codec.name}: type ${typeName} not found`); |
| 172 | + continue; |
| 173 | + } |
| 174 | + |
| 175 | + const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1); |
| 176 | + const hasOwnerId = !!codec.attributes.owner_id; |
| 177 | + const capturedCodec = codec; |
| 178 | + |
| 179 | + log.debug(`Adding mutation entry point "${fieldName}" for bucket type ${typeName} (entity-scoped=${hasOwnerId})`); |
| 180 | + |
| 181 | + newFields[fieldName] = context.fieldWithHooks( |
| 182 | + { fieldName } as any, |
| 183 | + { |
| 184 | + description: `Look up a ${typeName} by key for mutation operations (upload, etc.).`, |
| 185 | + type: bucketType, |
| 186 | + args: { |
| 187 | + key: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' }, |
| 188 | + ...(hasOwnerId |
| 189 | + ? { ownerId: { type: new GraphQLNonNull(GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } } |
| 190 | + : {}), |
| 191 | + }, |
| 192 | + plan(_$mutation: any, fieldArgs: any) { |
| 193 | + const $key = fieldArgs.getRaw('key'); |
| 194 | + const $ownerId = hasOwnerId ? fieldArgs.getRaw('ownerId') : lambda(null, (): null => null); |
| 195 | + const $withPgClient = (grafastContext() as any).get('withPgClient'); |
| 196 | + const $pgSettings = (grafastContext() as any).get('pgSettings'); |
| 197 | + |
| 198 | + const $combined = object({ |
| 199 | + key: $key, |
| 200 | + ownerId: $ownerId, |
| 201 | + withPgClient: $withPgClient, |
| 202 | + pgSettings: $pgSettings, |
| 203 | + }); |
| 204 | + |
| 205 | + const $row = lambda($combined, async (vals: any) => { |
| 206 | + return vals.withPgClient(vals.pgSettings, async (pgClient: any) => { |
| 207 | + const databaseId = await resolveDatabaseId(pgClient); |
| 208 | + if (!databaseId) throw new Error('DATABASE_NOT_FOUND'); |
| 209 | + |
| 210 | + const allConfigs = await loadAllStorageModules(pgClient, databaseId); |
| 211 | + const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs); |
| 212 | + if (!storageConfig) throw new Error('STORAGE_MODULE_NOT_FOUND'); |
| 213 | + |
| 214 | + const bucket = await getBucketConfig( |
| 215 | + pgClient, storageConfig, databaseId, vals.key, vals.ownerId ?? undefined, |
| 216 | + ); |
| 217 | + if (!bucket) throw new Error('BUCKET_NOT_FOUND'); |
| 218 | + |
| 219 | + return bucket; |
| 220 | + }); |
| 221 | + }); |
| 222 | + |
| 223 | + const columnEntries: Record<string, any> = {}; |
| 224 | + for (const col of Object.keys(capturedCodec.attributes)) { |
| 225 | + columnEntries[col] = access($row, col); |
| 226 | + } |
| 227 | + return object(columnEntries); |
| 228 | + }, |
| 229 | + }, |
| 230 | + ); |
| 231 | + } |
| 232 | + |
| 233 | + return build.extend( |
| 234 | + fields, |
| 235 | + newFields, |
| 236 | + 'PresignedUrlPlugin adding per-bucket mutation entry points', |
| 237 | + ); |
| 238 | + } |
| 239 | + |
| 240 | + // --- Path 2: Add upload fields on @storageBuckets types --- |
151 | 241 | if (!isPgClassType || !pgCodec || !pgCodec.attributes) { |
152 | 242 | return fields; |
153 | 243 | } |
|
0 commit comments