Skip to content

Commit 51eb5fd

Browse files
committed
update test purge old records
1 parent 2cc69d5 commit 51eb5fd

1 file changed

Lines changed: 358 additions & 0 deletions

File tree

tools/tokenserver/test_purge_old_records.py

Lines changed: 358 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -339,3 +339,361 @@ def test_purging_override_null_keys_changed_at(self):
339339
self.assertEqual(len(user_records), 1)
340340
self.assertEqual(user_records[0].node, self.spanner_node)
341341
self.assertEqual(len(self.service_requests), 0)
342+
343+
344+
# ===========================================================================
345+
# pytest-style equivalents — added alongside the classes above.
346+
# Once confirmed passing in CI the unittest classes will be removed.
347+
# Module-scoped WSGI server fixtures are defined here (file-local) because
348+
# they are only needed by this module.
349+
# ===========================================================================
350+
351+
352+
def _make_service_app(service_requests):
353+
"""Return a WSGI app that records each request into the given list."""
354+
355+
def _service_app(environ, start_response):
356+
service_requests.append(environ)
357+
start_response("200 OK", [])
358+
return ""
359+
360+
return _service_app
361+
362+
363+
@pytest.fixture(scope="module")
364+
def mock_service_server():
365+
"""Module-scoped mock WSGI service server.
366+
367+
Module scope is justified: the server is expensive to start (OS port
368+
allocation + thread) and is stateless between tests — the requests list
369+
is cleared in each per-test fixture's teardown.
370+
"""
371+
service_requests = []
372+
server = make_server("localhost", 0, _make_service_app(service_requests))
373+
server.RequestHandlerClass.log_request = lambda *a: None
374+
host, port = server.server_address
375+
service_node = f"http://{host}:{port}"
376+
thread = threading.Thread(target=server.serve_forever)
377+
thread.daemon = True
378+
thread.start()
379+
yield {"node": service_node, "requests": service_requests}
380+
server.shutdown()
381+
thread.join()
382+
383+
384+
@pytest.fixture(scope="module")
385+
def mock_spanner_server(mock_service_server):
386+
"""Module-scoped spanner WSGI server for migration tests.
387+
388+
Module scope justified: same reason as mock_service_server. Shares the
389+
service_requests list with mock_service_server to replicate the original
390+
test class behaviour where both servers appended to the same list.
391+
"""
392+
service_requests = mock_service_server["requests"]
393+
server = make_server("localhost", 0, _make_service_app(service_requests))
394+
server.RequestHandlerClass.log_request = lambda *a: None
395+
host, port = server.server_address
396+
spanner_node = f"http://{host}:{port}"
397+
downed_node = f"http://{host}:9999"
398+
thread = threading.Thread(target=server.serve_forever)
399+
thread.daemon = True
400+
thread.start()
401+
yield {"node": spanner_node, "downed_node": downed_node}
402+
server.shutdown()
403+
thread.join()
404+
405+
406+
@pytest.fixture(scope="function")
407+
def purge_db(mock_service_server):
408+
"""Per-test Database seeded with the mock service node."""
409+
database = Database()
410+
database._execute_sql("DELETE FROM users").close()
411+
database._execute_sql("DELETE FROM nodes").close()
412+
database._execute_sql("DELETE FROM services").close()
413+
database.add_service("sync-1.5", r"{node}/1.5/{uid}")
414+
database.add_node(mock_service_server["node"], 100)
415+
yield database
416+
database._execute_sql("DELETE FROM users").close()
417+
database._execute_sql("DELETE FROM nodes").close()
418+
database._execute_sql("DELETE FROM services").close()
419+
del mock_service_server["requests"][:]
420+
database.close()
421+
422+
423+
@pytest.fixture(scope="function")
424+
def migration_db(mock_service_server, mock_spanner_server):
425+
"""Per-test Database seeded with service, spanner, and downed nodes."""
426+
database = Database()
427+
database._execute_sql("DELETE FROM users").close()
428+
database._execute_sql("DELETE FROM nodes").close()
429+
database._execute_sql("DELETE FROM services").close()
430+
database.add_service("sync-1.5", r"{node}/1.5/{uid}")
431+
database.add_node(mock_service_server["node"], 100)
432+
database.add_node(mock_spanner_server["downed_node"], 100, downed=True)
433+
database.add_node(mock_spanner_server["node"], 100)
434+
yield database
435+
database._execute_sql("DELETE FROM users").close()
436+
database._execute_sql("DELETE FROM nodes").close()
437+
database._execute_sql("DELETE FROM services").close()
438+
del mock_service_server["requests"][:]
439+
database.close()
440+
441+
442+
def test_purging_of_old_user_records(purge_db, mock_service_server):
443+
"""Test purging of old user records."""
444+
database = purge_db
445+
service_requests = mock_service_server["requests"]
446+
447+
# Make some old user records.
448+
email = "test@mozilla.com"
449+
user = database.allocate_user(email, client_state="aa", generation=123)
450+
database.update_user(user, client_state="bb", generation=456, keys_changed_at=450)
451+
database.update_user(user, client_state="cc", generation=789)
452+
user_records = list(database.get_user_records(email))
453+
assert len(user_records) == 3
454+
user = database.get_user(email)
455+
assert user["client_state"] == "cc"
456+
assert len(user["old_client_states"]) == 2
457+
458+
# The default grace-period should prevent any cleanup.
459+
node_secret = "SECRET"
460+
assert purge_old_records(node_secret)
461+
user_records = list(database.get_user_records(email))
462+
assert len(user_records) == 3
463+
assert len(service_requests) == 0
464+
465+
# With no grace period, we should cleanup two old records.
466+
assert purge_old_records(node_secret, grace_period=0)
467+
user_records = list(database.get_user_records(email))
468+
assert len(user_records) == 1
469+
assert len(service_requests) == 2
470+
471+
# Check that the proper delete requests were made to the service.
472+
expected_kids = ["0000000000450-uw", "0000000000123-qg"]
473+
for i, environ in enumerate(service_requests):
474+
# They must be to the correct path.
475+
assert environ["REQUEST_METHOD"] == "DELETE"
476+
assert re.match("/1.5/[0-9]+", environ["PATH_INFO"])
477+
# They must have a correct request signature.
478+
token = hawkauthlib.get_id(environ)
479+
secret = tokenlib.get_derived_secret(token, secret=node_secret)
480+
assert hawkauthlib.check_signature(environ, secret)
481+
userdata = tokenlib.parse_token(token, secret=node_secret)
482+
assert "uid" in userdata
483+
assert "node" in userdata
484+
assert userdata["fxa_uid"] == "test"
485+
assert userdata["fxa_kid"] == expected_kids[i]
486+
487+
# Check that the user's current state is unaffected
488+
user = database.get_user(email)
489+
assert user["client_state"] == "cc"
490+
assert len(user["old_client_states"]) == 0
491+
492+
493+
def test_purging_is_not_done_on_downed_nodes(purge_db, mock_service_server):
494+
"""Test purging is not done on downed nodes."""
495+
database = purge_db
496+
service_node = mock_service_server["node"]
497+
service_requests = mock_service_server["requests"]
498+
node_secret = "SECRET"
499+
email = "test@mozilla.com"
500+
user = database.allocate_user(email, client_state="aa")
501+
database.update_user(user, client_state="bb")
502+
user_records = list(database.get_user_records(email))
503+
assert len(user_records) == 2
504+
505+
# With the node down, we should not purge any records.
506+
database.update_node(service_node, downed=1)
507+
assert purge_old_records(node_secret, grace_period=0)
508+
user_records = list(database.get_user_records(email))
509+
assert len(user_records) == 2
510+
assert len(service_requests) == 0
511+
512+
# With the node back up, we should purge correctly.
513+
database.update_node(service_node, downed=0)
514+
assert purge_old_records(node_secret, grace_period=0)
515+
user_records = list(database.get_user_records(email))
516+
assert len(user_records) == 1
517+
assert len(service_requests) == 1
518+
519+
520+
def test_force(purge_db, mock_service_server):
521+
"""Test force."""
522+
database = purge_db
523+
service_node = mock_service_server["node"]
524+
service_requests = mock_service_server["requests"]
525+
node_secret = "SECRET"
526+
email = "test@mozilla.com"
527+
user = database.allocate_user(email, client_state="aa")
528+
database.update_user(user, client_state="bb")
529+
user_records = list(database.get_user_records(email))
530+
assert len(user_records) == 2
531+
532+
# With the node down, we should be able to purge any records.
533+
database.update_node(service_node, downed=1)
534+
assert purge_old_records(node_secret, grace_period=0, force=True)
535+
536+
user_records = list(database.get_user_records(email))
537+
assert len(user_records) == 1
538+
assert len(service_requests) == 1
539+
540+
541+
def test_dry_run(purge_db, mock_service_server):
542+
"""Test dry run."""
543+
database = purge_db
544+
service_node = mock_service_server["node"]
545+
service_requests = mock_service_server["requests"]
546+
node_secret = "SECRET"
547+
email = "test@mozilla.com"
548+
user = database.allocate_user(email, client_state="aa")
549+
database.update_user(user, client_state="bb")
550+
user_records = list(database.get_user_records(email))
551+
assert len(user_records) == 2
552+
553+
database.update_node(service_node, downed=1)
554+
555+
# Don't actually perform anything destructive.
556+
assert purge_old_records(node_secret, grace_period=0, dryrun=True)
557+
558+
user_records = list(database.get_user_records(email))
559+
assert len(user_records) == 2
560+
assert len(service_requests) == 0
561+
562+
563+
@pytest.mark.migration_records
564+
def test_purging_replaced_at(migration_db, mock_service_server, mock_spanner_server):
565+
"""Test purging replaced at."""
566+
database = migration_db
567+
service_requests = mock_service_server["requests"]
568+
node_secret = "SECRET"
569+
email = "test@mozilla.com"
570+
user = database.allocate_user(email, client_state="aa")
571+
database.replace_user_record(user["uid"])
572+
573+
assert purge_old_records(node_secret, grace_period=0)
574+
user_records = list(database.get_user_records(email))
575+
assert len(user_records) == 0
576+
assert len(service_requests) == 1
577+
578+
579+
@pytest.mark.migration_records
580+
def test_purging_no_override(migration_db, mock_service_server, mock_spanner_server):
581+
"""Test purging no override."""
582+
database = migration_db
583+
service_requests = mock_service_server["requests"]
584+
spanner_node = mock_spanner_server["node"]
585+
node_secret = "SECRET"
586+
email = "test@mozilla.com"
587+
user = database.allocate_user(email, client_state="aa")
588+
database.replace_user_record(user["uid"])
589+
user = database.allocate_user(email, node=spanner_node, client_state="aa")
590+
591+
assert purge_old_records(node_secret, grace_period=0)
592+
user_records = list(database.get_user_records(email))
593+
assert len(user_records) == 1
594+
assert len(service_requests) == 1
595+
596+
597+
@pytest.mark.migration_records
598+
def test_purging_override_with_migrated(
599+
migration_db, mock_service_server, mock_spanner_server
600+
):
601+
"""Test purging override with migrated."""
602+
database = migration_db
603+
service_requests = mock_service_server["requests"]
604+
spanner_node = mock_spanner_server["node"]
605+
downed_node = mock_spanner_server["downed_node"]
606+
node_secret = "SECRET"
607+
email = "test@mozilla.com"
608+
609+
# User previously on a node now downed
610+
user = database.allocate_user(email, node=downed_node, client_state="aa")
611+
# Simulate the Spanner migration process (mark their original record as
612+
# replaced_at):
613+
# https://github.com/mozilla-services/cloudops-docs/blob/389e61f/Services/Durable%20Sync/SYNC-PY-MIGRATION.md#migration-steps
614+
615+
# The process then copied their data to spanner_node with no change to
616+
# their generation/client_state
617+
database.replace_user_record(user["uid"])
618+
# Migration finished: the user's active record now points to Spanner
619+
user = database.allocate_user(email, node=spanner_node, client_state="aa")
620+
621+
assert purge_old_records(
622+
node_secret, grace_period=0, force=True, override_node=spanner_node
623+
)
624+
user_records = list(database.get_user_records(email))
625+
# The user's old downed node record was purged
626+
assert len(user_records) == 1
627+
assert user_records[0].node == spanner_node
628+
# But that old downed node record had an identical
629+
# generation/client_state to the active spanner_node's record: so a
630+
# simple forcing of a delete to the spanner node would delete their
631+
# current data. Ensure force/override_node includes logic to detect
632+
# this case and not issue such a delete
633+
assert len(service_requests) == 0
634+
635+
636+
@pytest.mark.migration_records
637+
def test_purging_override_with_migrated_password_change(
638+
migration_db, mock_service_server, mock_spanner_server
639+
):
640+
"""Test purging override with migrated password change."""
641+
database = migration_db
642+
service_requests = mock_service_server["requests"]
643+
spanner_node = mock_spanner_server["node"]
644+
downed_node = mock_spanner_server["downed_node"]
645+
node_secret = "SECRET"
646+
email = "test@mozilla.com"
647+
648+
# A user migrated to spanner (like test_purging_override_with_migrated)
649+
user = database.allocate_user(email, node=downed_node, client_state="aa")
650+
database.replace_user_record(user["uid"])
651+
user = database.allocate_user(email, node=spanner_node, client_state="aa")
652+
# User changes their password
653+
database.update_user(user, client_state="ab")
654+
655+
assert purge_old_records(
656+
node_secret, grace_period=0, force=True, override_node=spanner_node
657+
)
658+
user_records = list(database.get_user_records(email))
659+
assert len(user_records) == 1
660+
# Both replaced_at records issued deletes as normal as neither point to
661+
# their active record
662+
assert len(service_requests) == 2
663+
664+
665+
@pytest.mark.migration_records
666+
def test_purging_override_null_keys_changed_at(
667+
migration_db, mock_service_server, mock_spanner_server
668+
):
669+
"""Test purging override null keys changed at."""
670+
# Same as test_purging_override_with_migrated but with a null
671+
# keys_changed_at
672+
database = migration_db
673+
service_requests = mock_service_server["requests"]
674+
spanner_node = mock_spanner_server["node"]
675+
downed_node = mock_spanner_server["downed_node"]
676+
node_secret = "SECRET"
677+
email = "test@mozilla.com"
678+
679+
user = database.allocate_user(
680+
email,
681+
node=downed_node,
682+
client_state="aa",
683+
keys_changed_at=None,
684+
)
685+
database.replace_user_record(user["uid"])
686+
user = database.allocate_user(
687+
email,
688+
node=spanner_node,
689+
client_state="aa",
690+
keys_changed_at=None,
691+
)
692+
693+
assert purge_old_records(
694+
node_secret, grace_period=0, force=True, override_node=spanner_node
695+
)
696+
user_records = list(database.get_user_records(email))
697+
assert len(user_records) == 1
698+
assert user_records[0].node == spanner_node
699+
assert len(service_requests) == 0

0 commit comments

Comments
 (0)