@@ -829,11 +829,11 @@ var row = new Dictionary<string, object>
829829 };
830830
831831// Serialization:
832- // ColumnCount (4) + " UserId" metadata (20 ) + 4 bytes (int32)
833- // + " Biography" metadata (30 ) + 4100 bytes (string data)
834- // ≈ 4 + 20 + 4 + 30 + 4100 = 4158 bytes
832+ // ColumnCount (4) + UserId metadata (15 ) + Name metadata (21)
833+ // + Biography metadata (4030 ) + 4100 bytes (string data)
834+ // ≈ 4 + 15 + 21 + 4030 + 4100 = 8206 bytes
835835//
836- // Result: 4158 > 4096 (page size)
836+ // Result: 8206 > 4096 (page size)
837837// ❌ ERROR! Record too large for page!
838838```
839839
@@ -1156,12 +1156,102 @@ catch (InvalidOperationException ex)
11561156 // Serialized size is 4158, but max is 4056!
11571157
11581158 Console .WriteLine (ex .Message );
1159+ // FIX: Increase page size BEFORE inserting large records
11591160 }
11601161
11611162// Code that causes this:
11621163// if (recordData.Length > MAX_RECORD_SIZE) // MAX_RECORD_SIZE ≈ 4056
11631164// return Error("Record too large for page");
11641165
1166+ // ⚠️ IMPORTANT: Page size is FIXED at database creation time
1167+ // You CANNOT change it dynamically after creation.
1168+ // All existing pages, FSM, and Block Registry depend on it.
1169+ // Changing page size requires complete database migration.
1170+ ```
1171+
1172+ ** Best Practice: Pre-calculate and Plan Page Size**
1173+
1174+ ``` csharp
1175+ // BEFORE creating database, estimate your largest record:
1176+
1177+ var largestExpectedRow = new Dictionary <string , object >
1178+ {
1179+ [" UserId" ] = 1 ,
1180+ [" Name" ] = " John Doe" ,
1181+ [" Description" ] = " Some description..." ,
1182+ [" LargeText" ] = new string ('X' , 5000 ), // Large column
1183+ };
1184+
1185+ // Estimate serialized size:
1186+ // ColumnCount(4) + UserId metadata(15) + Name metadata(21)
1187+ // + Description metadata(30) + LargeText metadata(4030) ≈ 4100 bytes
1188+
1189+ // Since 4100 > 4056 (max for 4KB page), choose larger page:
1190+ var options = new DatabaseOptions
1191+ {
1192+ PageSize = 8192 , // 8KB page → 8152 bytes available ✅
1193+ CreateImmediately = true ,
1194+ };
1195+
1196+ var db = new SharpCoreDB (options );
1197+
1198+ // Now large records will work!
1199+ db .InsertRecord (largestExpectedRow ); // ✅ Success
1200+ ```
1201+
1202+ ** Why Dynamic Page Size Isn't Practical**
1203+
1204+ SharpCoreDB's architecture makes dynamic page resizing impossible without complete database migration:
1205+
1206+ 1 . ** File Header** : Page size is stored once, read at startup
1207+ 2 . ** FSM (Free Space Map)** : Bitmap assumes fixed page size
1208+ 3 . ** Block Registry** : Offsets are multiples of current page size
1209+ 4 . ** Existing Records** : All stored with current page boundaries
1210+
1211+ Changing page size would require:
1212+ - ❌ Reading every page from disk
1213+ - ❌ Recalculating all offsets
1214+ - ❌ Rebuilding entire FSM
1215+ - ❌ Rewriting entire file
1216+
1217+ ** Solution: Design Your Schema Appropriately**
1218+
1219+ ``` csharp
1220+ // ❌ DON'T: Store entire biography in one record
1221+ var row = new Dictionary <string , object >
1222+ {
1223+ [" UserId" ] = 1 ,
1224+ [" Biography" ] = new string ('X' , 100_ 000 ), // 100KB!
1225+ };
1226+
1227+ // ✅ DO: Split into manageable pieces
1228+ var userRecord = new Dictionary <string , object >
1229+ {
1230+ [" UserId" ] = 1 ,
1231+ [" Name" ] = " John Doe" ,
1232+ };
1233+
1234+ var bioChunk1 = new Dictionary <string , object >
1235+ {
1236+ [" UserId" ] = 1 ,
1237+ [" BioPart" ] = 1 ,
1238+ [" Content" ] = " First part..." , // ~2KB
1239+ };
1240+
1241+ var bioChunk2 = new Dictionary <string , object >
1242+ {
1243+ [" UserId" ] = 1 ,
1244+ [" BioPart" ] = 2 ,
1245+ [" Content" ] = " Second part..." , // ~2KB
1246+ };
1247+
1248+ // OR: Store reference + external file
1249+ var userWithRef = new Dictionary <string , object >
1250+ {
1251+ [" UserId" ] = 1 ,
1252+ [" BioFileRef" ] = " user_1_bio.txt" , // Small reference
1253+ };
1254+
11651255
11661256
11671257
0 commit comments