@@ -290,3 +290,168 @@ pub async fn get_quirks(pool: &PgPool, unit_id: i32) -> Result<Vec<DbQuirk>, App
290290 . await ?;
291291 Ok ( rows)
292292}
293+
294+ #[ cfg( test) ]
295+ mod tests {
296+ use super :: * ;
297+
298+ /// Insert units whose auto-increment IDs do NOT match alphabetical name
299+ /// order — the exact scenario that triggered the keyset pagination bug.
300+ async fn seed_units ( pool : & PgPool ) {
301+ sqlx:: query (
302+ "INSERT INTO unit_chassis (slug, name, unit_type, tech_base, tonnage)
303+ VALUES ('test-mech', 'Test', 'BattleMech', 'inner_sphere', 75)" ,
304+ )
305+ . execute ( pool)
306+ . await
307+ . unwrap ( ) ;
308+
309+ let chassis_id: i32 =
310+ sqlx:: query_scalar ( "SELECT id FROM unit_chassis WHERE slug = 'test-mech'" )
311+ . fetch_one ( pool)
312+ . await
313+ . unwrap ( ) ;
314+
315+ // Inserted in reverse-alpha order → id 1=Zephyr … id 5=Atlas.
316+ let units = [
317+ ( "zephyr-zph-1" , "ZPH-1" , "Zephyr ZPH-1" ) ,
318+ ( "dragon-drg-1n" , "DRG-1N" , "Dragon DRG-1N" ) ,
319+ ( "centurion-cn9-a" , "CN9-A" , "Centurion CN9-A" ) ,
320+ ( "banshee-bnc-3e" , "BNC-3E" , "Banshee BNC-3E" ) ,
321+ ( "atlas-as7-d" , "AS7-D" , "Atlas AS7-D" ) ,
322+ ] ;
323+ for ( slug, variant, full_name) in & units {
324+ sqlx:: query (
325+ "INSERT INTO units (slug, chassis_id, variant, full_name, tech_base, rules_level, tonnage)
326+ VALUES ($1, $2, $3, $4, 'inner_sphere', 'standard', 75)" ,
327+ )
328+ . bind ( slug)
329+ . bind ( chassis_id)
330+ . bind ( variant)
331+ . bind ( full_name)
332+ . execute ( pool)
333+ . await
334+ . unwrap ( ) ;
335+ }
336+ }
337+
338+ fn empty_filter ( ) -> UnitFilter < ' static > {
339+ UnitFilter {
340+ name_search : None ,
341+ tech_base : None ,
342+ rules_level : None ,
343+ tonnage_min : None ,
344+ tonnage_max : None ,
345+ faction_slug : None ,
346+ era_slug : None ,
347+ is_omnimech : None ,
348+ config : None ,
349+ engine_type : None ,
350+ has_jump : None ,
351+ role : None ,
352+ }
353+ }
354+
355+ /// Regression: pagination must not produce duplicates or skip items when
356+ /// DB row IDs don't match the alphabetical sort order.
357+ #[ sqlx:: test( migrations = "../../migrations" ) ]
358+ async fn keyset_pagination_no_duplicates_or_gaps ( pool : PgPool ) {
359+ seed_units ( & pool) . await ;
360+
361+ let mut all_names: Vec < String > = vec ! [ ] ;
362+ let mut cursor: Option < ( String , i32 ) > = None ;
363+ let mut pages = 0 ;
364+
365+ loop {
366+ let cursor_ref = cursor. as_ref ( ) . map ( |( s, id) | ( s. as_str ( ) , * id) ) ;
367+ let ( rows, total, has_next) =
368+ search ( & pool, empty_filter ( ) , 2 , cursor_ref) . await . unwrap ( ) ;
369+
370+ assert_eq ! ( total, 5 , "totalCount must be stable across all pages" ) ;
371+
372+ for row in & rows {
373+ all_names. push ( row. full_name . clone ( ) ) ;
374+ }
375+
376+ pages += 1 ;
377+ if !has_next {
378+ break ;
379+ }
380+ let last = rows. last ( ) . unwrap ( ) ;
381+ cursor = Some ( ( last. full_name . clone ( ) , last. id ) ) ;
382+ }
383+
384+ assert_eq ! ( pages, 3 , "5 items / page size 2 = 3 pages" ) ;
385+ assert_eq ! (
386+ all_names,
387+ [ "Atlas AS7-D" , "Banshee BNC-3E" , "Centurion CN9-A" , "Dragon DRG-1N" , "Zephyr ZPH-1" ] ,
388+ "items must appear in alphabetical order with no duplicates or gaps"
389+ ) ;
390+ }
391+
392+ /// Regression: totalCount must reflect all filtered rows, not just rows
393+ /// remaining after the cursor position.
394+ #[ sqlx:: test( migrations = "../../migrations" ) ]
395+ async fn total_count_stable_across_pages ( pool : PgPool ) {
396+ seed_units ( & pool) . await ;
397+
398+ let ( page1, total1, _) = search ( & pool, empty_filter ( ) , 2 , None ) . await . unwrap ( ) ;
399+ let last = page1. last ( ) . unwrap ( ) ;
400+ let cursor = ( last. full_name . as_str ( ) , last. id ) ;
401+
402+ let ( _, total2, _) = search ( & pool, empty_filter ( ) , 2 , Some ( cursor) ) . await . unwrap ( ) ;
403+
404+ assert_eq ! ( total1, total2, "totalCount must not change between pages" ) ;
405+ }
406+
407+ /// Edge case: items with the same name must paginate correctly using the
408+ /// ID tiebreaker.
409+ #[ sqlx:: test( migrations = "../../migrations" ) ]
410+ async fn pagination_with_duplicate_names ( pool : PgPool ) {
411+ sqlx:: query (
412+ "INSERT INTO unit_chassis (slug, name, unit_type, tech_base, tonnage)
413+ VALUES ('test-mech', 'Test', 'BattleMech', 'inner_sphere', 75)" ,
414+ )
415+ . execute ( & pool)
416+ . await
417+ . unwrap ( ) ;
418+
419+ let chassis_id: i32 =
420+ sqlx:: query_scalar ( "SELECT id FROM unit_chassis WHERE slug = 'test-mech'" )
421+ . fetch_one ( & pool)
422+ . await
423+ . unwrap ( ) ;
424+
425+ for i in 1 ..=3 {
426+ sqlx:: query (
427+ "INSERT INTO units (slug, chassis_id, variant, full_name, tech_base, rules_level, tonnage)
428+ VALUES ($1, $2, $3, 'Same Name', 'inner_sphere', 'standard', 75)" ,
429+ )
430+ . bind ( format ! ( "same-name-{i}" ) )
431+ . bind ( chassis_id)
432+ . bind ( format ! ( "V{i}" ) )
433+ . execute ( & pool)
434+ . await
435+ . unwrap ( ) ;
436+ }
437+
438+ let ( page1, _, has_next) = search ( & pool, empty_filter ( ) , 2 , None ) . await . unwrap ( ) ;
439+ assert ! ( has_next) ;
440+ assert_eq ! ( page1. len( ) , 2 ) ;
441+
442+ let last = page1. last ( ) . unwrap ( ) ;
443+ let cursor = ( last. full_name . as_str ( ) , last. id ) ;
444+ let ( page2, _, has_next2) = search ( & pool, empty_filter ( ) , 2 , Some ( cursor) ) . await . unwrap ( ) ;
445+ assert ! ( !has_next2) ;
446+ assert_eq ! ( page2. len( ) , 1 ) ;
447+
448+ let all_ids: Vec < i32 > = page1. iter ( ) . chain ( page2. iter ( ) ) . map ( |u| u. id ) . collect ( ) ;
449+ assert_eq ! ( all_ids. len( ) , 3 ) ;
450+ let unique: std:: collections:: HashSet < i32 > = all_ids. iter ( ) . copied ( ) . collect ( ) ;
451+ assert_eq ! ( unique. len( ) , 3 , "no duplicate IDs" ) ;
452+ assert ! (
453+ all_ids. windows( 2 ) . all( |w| w[ 0 ] < w[ 1 ] ) ,
454+ "IDs must be ascending when names are equal"
455+ ) ;
456+ }
457+ }
0 commit comments