|
5 | 5 |
|
6 | 6 | import unittest |
7 | 7 |
|
| 8 | +from integration_tests.tokenserver.conftest import ( |
| 9 | + NODE_ID, |
| 10 | + add_node, |
| 11 | + build_oauth_headers, |
| 12 | + count_users, |
| 13 | + execute_sql, |
| 14 | + get_node, |
| 15 | + get_user, |
| 16 | +) |
8 | 17 | from integration_tests.tokenserver.test_support import TestCase |
9 | 18 | from sqlalchemy.sql import text as sqltext |
10 | 19 |
|
@@ -160,3 +169,250 @@ def test_unsuccessfully_releasing_node_capacity(self): |
160 | 169 | "status": "internal-error", |
161 | 170 | } |
162 | 171 | self.assertEqual(res.json, expected_error_response) |
| 172 | + |
| 173 | + |
| 174 | +# =========================================================================== |
| 175 | +# pytest-style equivalents — added alongside the class above. |
| 176 | +# Once confirmed passing in CI the TestNodeAssignment class will be removed. |
| 177 | +# Fixtures are defined in tools/integration_tests/tokenserver/conftest.py. |
| 178 | +# =========================================================================== |
| 179 | + |
| 180 | + |
| 181 | +def test_user_creation(ts_ctx): |
| 182 | + """Test user creation.""" |
| 183 | + db_conn = ts_ctx["db_conn"] |
| 184 | + app = ts_ctx["app"] |
| 185 | + service_id = ts_ctx["service_id"] |
| 186 | + |
| 187 | + # Add a few more nodes |
| 188 | + add_node(db_conn, service_id, available=0, node="https://node1") |
| 189 | + add_node(db_conn, service_id, available=1, node="https://node2") |
| 190 | + add_node(db_conn, service_id, available=5, node="https://node3") |
| 191 | + |
| 192 | + # Send a request from an unseen user |
| 193 | + headers = build_oauth_headers( |
| 194 | + generation=1234, keys_changed_at=1234, client_state="aaaa" |
| 195 | + ) |
| 196 | + res = app.get("/1.0/sync/1.5", headers=headers) |
| 197 | + # Ensure a single user was created |
| 198 | + assert count_users(db_conn) == 1 |
| 199 | + # Ensure the user has the correct attributes |
| 200 | + user1 = get_user(db_conn, res.json["uid"]) |
| 201 | + assert user1["generation"] == 1234 |
| 202 | + assert user1["keys_changed_at"] == 1234 |
| 203 | + assert user1["client_state"] == "aaaa" |
| 204 | + assert user1["nodeid"] == NODE_ID |
| 205 | + assert user1["service"] == service_id |
| 206 | + # Ensure the 'available' and 'current_load' counts on the node |
| 207 | + # assigned to the user have been decremented appropriately |
| 208 | + node = get_node(db_conn, NODE_ID) |
| 209 | + assert node["available"] == 99 |
| 210 | + assert node["current_load"] == 1 |
| 211 | + # Send a request from the same user |
| 212 | + app.get("/1.0/sync/1.5", headers=headers) |
| 213 | + # Ensure another user record was not created |
| 214 | + assert count_users(db_conn) == 1 |
| 215 | + |
| 216 | + |
| 217 | +def test_new_user_allocation(ts_ctx): |
| 218 | + """Test new user allocation.""" |
| 219 | + db_conn = ts_ctx["db_conn"] |
| 220 | + app = ts_ctx["app"] |
| 221 | + service_id = ts_ctx["service_id"] |
| 222 | + |
| 223 | + # Start with a clean database |
| 224 | + execute_sql(db_conn, sqltext("DELETE FROM nodes"), {}).close() |
| 225 | + |
| 226 | + add_node( |
| 227 | + db_conn, |
| 228 | + service_id, |
| 229 | + available=100, |
| 230 | + current_load=0, |
| 231 | + capacity=100, |
| 232 | + backoff=1, |
| 233 | + node="https://node1", |
| 234 | + ) |
| 235 | + add_node( |
| 236 | + db_conn, |
| 237 | + service_id, |
| 238 | + available=100, |
| 239 | + current_load=0, |
| 240 | + capacity=100, |
| 241 | + downed=1, |
| 242 | + node="https://node2", |
| 243 | + ) |
| 244 | + node_id = add_node( |
| 245 | + db_conn, |
| 246 | + service_id, |
| 247 | + available=99, |
| 248 | + current_load=1, |
| 249 | + capacity=100, |
| 250 | + node="https://node3", |
| 251 | + ) |
| 252 | + add_node( |
| 253 | + db_conn, |
| 254 | + service_id, |
| 255 | + available=98, |
| 256 | + current_load=2, |
| 257 | + capacity=100, |
| 258 | + node="https://node4", |
| 259 | + ) |
| 260 | + add_node( |
| 261 | + db_conn, |
| 262 | + service_id, |
| 263 | + available=97, |
| 264 | + current_load=3, |
| 265 | + capacity=100, |
| 266 | + node="https://node5", |
| 267 | + ) |
| 268 | + |
| 269 | + headers = build_oauth_headers( |
| 270 | + generation=1234, keys_changed_at=1234, client_state="aaaa" |
| 271 | + ) |
| 272 | + res = app.get("/1.0/sync/1.5", headers=headers) |
| 273 | + # The user should have been allocated to the least-loaded node |
| 274 | + # (computed as current_load / capacity) that has backoff and downed |
| 275 | + # set to 0 |
| 276 | + user = get_user(db_conn, res.json["uid"]) |
| 277 | + assert user["nodeid"] == node_id |
| 278 | + # The selected node should have current_load incremented and available |
| 279 | + # decremented |
| 280 | + node = get_node(db_conn, node_id) |
| 281 | + assert node["current_load"] == 2 |
| 282 | + assert node["available"] == 98 |
| 283 | + |
| 284 | + |
| 285 | +def test_successfully_releasing_node_capacity(ts_ctx): |
| 286 | + """Test successfully releasing node capacity.""" |
| 287 | + db_conn = ts_ctx["db_conn"] |
| 288 | + app = ts_ctx["app"] |
| 289 | + service_id = ts_ctx["service_id"] |
| 290 | + |
| 291 | + # Start with a clean database |
| 292 | + execute_sql(db_conn, sqltext("DELETE FROM nodes"), {}).close() |
| 293 | + |
| 294 | + node_id1 = add_node( |
| 295 | + db_conn, |
| 296 | + service_id, |
| 297 | + available=0, |
| 298 | + current_load=99, |
| 299 | + capacity=100, |
| 300 | + node="https://node1", |
| 301 | + ) |
| 302 | + node_id2 = add_node( |
| 303 | + db_conn, |
| 304 | + service_id, |
| 305 | + available=0, |
| 306 | + current_load=90, |
| 307 | + capacity=100, |
| 308 | + node="https://node2", |
| 309 | + ) |
| 310 | + node_id3 = add_node( |
| 311 | + db_conn, |
| 312 | + service_id, |
| 313 | + available=0, |
| 314 | + current_load=80, |
| 315 | + capacity=81, |
| 316 | + node="https://node3", |
| 317 | + ) |
| 318 | + node_id4 = add_node( |
| 319 | + db_conn, |
| 320 | + service_id, |
| 321 | + available=0, |
| 322 | + current_load=70, |
| 323 | + capacity=71, |
| 324 | + backoff=1, |
| 325 | + node="https://node4", |
| 326 | + ) |
| 327 | + node_id5 = add_node( |
| 328 | + db_conn, |
| 329 | + service_id, |
| 330 | + available=0, |
| 331 | + current_load=60, |
| 332 | + capacity=61, |
| 333 | + downed=1, |
| 334 | + node="https://node5", |
| 335 | + ) |
| 336 | + |
| 337 | + headers = build_oauth_headers( |
| 338 | + generation=1234, keys_changed_at=1234, client_state="aaaa" |
| 339 | + ) |
| 340 | + res = app.get("/1.0/sync/1.5", headers=headers) |
| 341 | + # Since every node has no available spots, capacity is added to each |
| 342 | + # node according to the equation |
| 343 | + # min(capacity*capacity_release_rate, capacity - current_load). Since |
| 344 | + # capacity - current_load is 0 for every node, the node with the |
| 345 | + # greatest capacity is chosen |
| 346 | + user = get_user(db_conn, res.json["uid"]) |
| 347 | + assert user["nodeid"] == node_id2 |
| 348 | + # min(100 * 0.1, 100 - 99) = 1 |
| 349 | + node1 = get_node(db_conn, node_id1) |
| 350 | + assert node1["available"] == 1 |
| 351 | + # min(100 * 0.1, 100 - 90) = 10, and this is the node to which the |
| 352 | + # user was assigned, so the final available count is 9 |
| 353 | + node2 = get_node(db_conn, node_id2) |
| 354 | + assert node2["available"] == 9 |
| 355 | + # min(81 * 0.1, 81 - 80) = 1 |
| 356 | + node3 = get_node(db_conn, node_id3) |
| 357 | + assert node3["available"] == 1 |
| 358 | + # min(100 * 0.1, 71 - 70) = 1 |
| 359 | + node4 = get_node(db_conn, node_id4) |
| 360 | + assert node4["available"] == 1 |
| 361 | + # Nodes with downed set to 1 do not have their availability updated |
| 362 | + node5 = get_node(db_conn, node_id5) |
| 363 | + assert node5["available"] == 0 |
| 364 | + # Suppress unused variable warnings — node IDs retained for readability |
| 365 | + _ = node_id5 |
| 366 | + |
| 367 | + |
| 368 | +def test_unsuccessfully_releasing_node_capacity(ts_ctx): |
| 369 | + """Test unsuccessfully releasing node capacity.""" |
| 370 | + db_conn = ts_ctx["db_conn"] |
| 371 | + app = ts_ctx["app"] |
| 372 | + service_id = ts_ctx["service_id"] |
| 373 | + |
| 374 | + # Start with a clean database |
| 375 | + execute_sql(db_conn, sqltext("DELETE FROM nodes"), {}).close() |
| 376 | + |
| 377 | + add_node( |
| 378 | + db_conn, |
| 379 | + service_id, |
| 380 | + available=0, |
| 381 | + current_load=100, |
| 382 | + capacity=100, |
| 383 | + node="https://node1", |
| 384 | + ) |
| 385 | + add_node( |
| 386 | + db_conn, |
| 387 | + service_id, |
| 388 | + available=0, |
| 389 | + current_load=90, |
| 390 | + capacity=90, |
| 391 | + node="https://node2", |
| 392 | + ) |
| 393 | + add_node( |
| 394 | + db_conn, |
| 395 | + service_id, |
| 396 | + available=0, |
| 397 | + current_load=80, |
| 398 | + capacity=80, |
| 399 | + node="https://node3", |
| 400 | + ) |
| 401 | + |
| 402 | + headers = build_oauth_headers( |
| 403 | + generation=1234, keys_changed_at=1234, client_state="aaaa" |
| 404 | + ) |
| 405 | + # All of these nodes are completely full, and no capacity can be released |
| 406 | + res = app.get("/1.0/sync/1.5", headers=headers, status=503) |
| 407 | + # The response has the expected body |
| 408 | + expected_error_response = { |
| 409 | + "errors": [ |
| 410 | + { |
| 411 | + "description": "Unexpected error: unable to get a node", |
| 412 | + "location": "internal", |
| 413 | + "name": "", |
| 414 | + } |
| 415 | + ], |
| 416 | + "status": "internal-error", |
| 417 | + } |
| 418 | + assert res.json == expected_error_response |
0 commit comments