Skip to content

Commit 262448a

Browse files
Abel Milashclaude
andcommitted
Improve chunking test coverage and walkthrough section order
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 5ad6d4c commit 262448a

2 files changed

Lines changed: 95 additions & 72 deletions

File tree

examples/advanced/walkthrough.py

Lines changed: 49 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -228,45 +228,15 @@ def _run_walkthrough(client):
228228
print(f"[OK] Updated {len(ids)} records to new_Completed=True")
229229

230230
# ============================================================================
231-
# 6. PAGING DEMO
232-
# ============================================================================
233-
print("\n" + "=" * 80)
234-
print("6. Paging Demo")
235-
print("=" * 80)
236-
237-
# Create 20 records for paging
238-
log_call(f"client.records.create('{table_name}', [20 records])")
239-
paging_records = [
240-
{
241-
"new_Title": f"Paging test item {i}",
242-
"new_Quantity": i,
243-
"new_Amount": i * 10.0,
244-
"new_Completed": False,
245-
"new_Priority": Priority.LOW,
246-
}
247-
for i in range(1, 21)
248-
]
249-
paging_ids = backoff(lambda: client.records.create(table_name, paging_records))
250-
print(f"[OK] Created {len(paging_ids)} records for paging demo")
251-
252-
# Query with paging
253-
log_call(f"client.records.get('{table_name}', page_size=5)")
254-
print("Fetching records with page_size=5...")
255-
paging_iterator = backoff(lambda: client.records.get(table_name, orderby=["new_Quantity"], page_size=5))
256-
for page_num, page in enumerate(paging_iterator, start=1):
257-
record_ids = [r.get("new_walkthroughdemoid")[:8] + "..." for r in page]
258-
print(f" Page {page_num}: {len(page)} records - IDs: {record_ids}")
259-
260-
# ============================================================================
261-
# 6b. LARGE BATCH (AUTO-CHUNKING)
231+
# 6. LARGE BATCH (AUTO-CHUNKING)
262232
# The SDK automatically splits lists > 1,000 records into sequential chunks,
263233
# each dispatched as a separate CreateMultiple / UpdateMultiple / UpsertMultiple
264234
# request. No manual pre-splitting needed.
265235
# Note: chunked operations are NOT atomic — a failure mid-way leaves earlier
266236
# chunks applied.
267237
# ============================================================================
268238
print("\n" + "=" * 80)
269-
print("6b. Large Batch (Auto-Chunking)")
239+
print("6. Large Batch (Auto-Chunking)")
270240
print("=" * 80)
271241

272242
LARGE_BATCH_SIZE = 1200 # spans 2 chunks: first 1000 + remaining 200
@@ -289,10 +259,40 @@ def _run_walkthrough(client):
289259
print(f"[OK] Updated {len(large_batch_ids)} records across 2 auto-chunks")
290260

291261
# ============================================================================
292-
# 7. QUERYBUILDER - FLUENT QUERIES
262+
# 7. PAGING DEMO
263+
# ============================================================================
264+
print("\n" + "=" * 80)
265+
print("7. Paging Demo")
266+
print("=" * 80)
267+
268+
# Create 20 records for paging
269+
log_call(f"client.records.create('{table_name}', [20 records])")
270+
paging_records = [
271+
{
272+
"new_Title": f"Paging test item {i}",
273+
"new_Quantity": i,
274+
"new_Amount": i * 10.0,
275+
"new_Completed": False,
276+
"new_Priority": Priority.LOW,
277+
}
278+
for i in range(1, 21)
279+
]
280+
paging_ids = backoff(lambda: client.records.create(table_name, paging_records))
281+
print(f"[OK] Created {len(paging_ids)} records for paging demo")
282+
283+
# Query with paging
284+
log_call(f"client.records.get('{table_name}', page_size=5)")
285+
print("Fetching records with page_size=5...")
286+
paging_iterator = backoff(lambda: client.records.get(table_name, orderby=["new_Quantity"], page_size=5))
287+
for page_num, page in enumerate(paging_iterator, start=1):
288+
record_ids = [r.get("new_walkthroughdemoid")[:8] + "..." for r in page]
289+
print(f" Page {page_num}: {len(page)} records - IDs: {record_ids}")
290+
291+
# ============================================================================
292+
# 8. QUERYBUILDER - FLUENT QUERIES
293293
# ============================================================================
294294
print("\n" + "=" * 80)
295-
print("7. QueryBuilder - Fluent Queries")
295+
print("8. QueryBuilder - Fluent Queries")
296296
print("=" * 80)
297297

298298
# Basic fluent query: active records sorted by amount (flat iteration)
@@ -398,10 +398,10 @@ def _run_walkthrough(client):
398398
print(" (empty DataFrame)")
399399

400400
# ============================================================================
401-
# 8. EXPAND (NAVIGATION PROPERTIES)
401+
# 9. EXPAND (NAVIGATION PROPERTIES)
402402
# ============================================================================
403403
print("\n" + "=" * 80)
404-
print("8. Expand (Navigation Properties)")
404+
print("9. Expand (Navigation Properties)")
405405
print("=" * 80)
406406

407407
# Simple expand: fetch accounts with their primary contact in one request
@@ -439,10 +439,10 @@ def _run_walkthrough(client):
439439
print(f"[SKIP] Nested expand demo skipped: {e}")
440440

441441
# ============================================================================
442-
# 9. SQL QUERY
442+
# 10. SQL QUERY
443443
# ============================================================================
444444
print("\n" + "=" * 80)
445-
print("9. SQL Query")
445+
print("10. SQL Query")
446446
print("=" * 80)
447447

448448
log_call(f"client.query.sql('SELECT new_title, new_quantity FROM {table_name} WHERE new_completed = 1')")
@@ -456,10 +456,10 @@ def _run_walkthrough(client):
456456
print(f"[WARN] SQL query failed (known server-side bug): {str(e)}")
457457

458458
# ============================================================================
459-
# 10. PICKLIST LABEL CONVERSION
459+
# 11. PICKLIST LABEL CONVERSION
460460
# ============================================================================
461461
print("\n" + "=" * 80)
462-
print("10. Picklist Label Conversion")
462+
print("11. Picklist Label Conversion")
463463
print("=" * 80)
464464

465465
log_call(f"client.records.create('{table_name}', {{'new_Priority': 'High'}})")
@@ -487,10 +487,10 @@ def _run_walkthrough(client):
487487
)
488488

489489
# ============================================================================
490-
# 11. COLUMN MANAGEMENT
490+
# 12. COLUMN MANAGEMENT
491491
# ============================================================================
492492
print("\n" + "=" * 80)
493-
print("11. Column Management")
493+
print("12. Column Management")
494494
print("=" * 80)
495495

496496
log_call(f"client.tables.add_columns('{table_name}', {{'new_Notes': 'string'}})")
@@ -503,10 +503,10 @@ def _run_walkthrough(client):
503503
print(f"[OK] Deleted column: new_Notes")
504504

505505
# ============================================================================
506-
# 12. DELETE OPERATIONS
506+
# 13. DELETE OPERATIONS
507507
# ============================================================================
508508
print("\n" + "=" * 80)
509-
print("12. Delete Operations")
509+
print("13. Delete Operations")
510510
print("=" * 80)
511511

512512
# Single delete
@@ -521,10 +521,10 @@ def _run_walkthrough(client):
521521
print(f" (Deleting {len(paging_ids)} paging demo records)")
522522

523523
# ============================================================================
524-
# 13. BATCH OPERATIONS
524+
# 14. BATCH OPERATIONS
525525
# ============================================================================
526526
print("\n" + "=" * 80)
527-
print("13. Batch Operations")
527+
print("14. Batch Operations")
528528
print("=" * 80)
529529

530530
# Batch create: send 2 creates in a single POST $batch
@@ -599,10 +599,10 @@ def _run_walkthrough(client):
599599
print(f"[OK] Batch delete: {len(result.succeeded)} records deleted in one HTTP request")
600600

601601
# ============================================================================
602-
# 14. CLEANUP
602+
# 15. CLEANUP
603603
# ============================================================================
604604
print("\n" + "=" * 80)
605-
print("14. Cleanup")
605+
print("15. Cleanup")
606606
print("=" * 80)
607607

608608
log_call(f"client.tables.delete('{table_name}')")
@@ -632,8 +632,8 @@ def _run_walkthrough(client):
632632
print(" [OK] Single and multiple record creation")
633633
print(" [OK] Reading records by ID and with filters")
634634
print(" [OK] Single and multiple record updates")
635-
print(" [OK] Paging through large result sets")
636635
print(" [OK] Large batch auto-chunking (1,200 records split into 2 chunks)")
636+
print(" [OK] Paging through large result sets")
637637
print(" [OK] QueryBuilder fluent queries (filter_eq, filter_in, filter_between, where, to_dataframe)")
638638
print(" [OK] Expand navigation properties (simple + nested ExpandOption)")
639639
print(" [OK] SQL queries")

tests/unit/data/test_multiple_chunking.py

Lines changed: 46 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,7 @@
22
# Licensed under the MIT license.
33

44
"""Comprehensive tests for _create_multiple / _update_multiple / _upsert_multiple
5-
client-side chunking (issue #156).
6-
7-
Coverage goals
8-
--------------
9-
- Boundary conditions: 0, 1, BATCH-1, BATCH, BATCH+1, 2*BATCH, 2*BATCH+1 records
10-
- Chunk sizes: first chunk always full, last chunk carries the remainder
11-
- Payload correctness: each chunk sent to the right endpoint with the right records
12-
- ID aggregation: IDs from all chunks are collected in order
13-
- _update_by_ids: delegates correctly to _update_multiple (broadcast + paired)
14-
- Public API (records.create / records.update / records.upsert): delegates correctly
5+
client-side chunking.
156
"""
167

178
import unittest
@@ -31,7 +22,6 @@ def _make_odata_client() -> _ODataClient:
3122
mock_auth._acquire_token.return_value = MagicMock(access_token="token")
3223
client = _ODataClient(mock_auth, "https://org.crm.dynamics.com")
3324
client._request = MagicMock()
34-
# Skip picklist HTTP calls so _request counts reflect only batch POSTs
3525
client._convert_labels_to_ints = MagicMock(side_effect=lambda _t, r: r)
3626
return client
3727

@@ -45,7 +35,7 @@ def _mock_create_response(ids):
4535

4636

4737
def _mock_update_response():
48-
"""Mock HTTP response for UpdateMultiple (no meaningful body)."""
38+
"""Mock HTTP response for UpdateMultiple."""
4939
resp = MagicMock()
5040
resp.text = ""
5141
return resp
@@ -80,43 +70,43 @@ def test_zero_records_no_request(self):
8070
self.assertEqual(result, [])
8171

8272
def test_one_record_single_request(self):
83-
"""Single record one request, one ID returned."""
73+
"""Single record produces one request and one ID returned."""
8474
result = self._run(1, [_mock_create_response(["id-0"])])
8575
self.od._execute_raw.assert_called_once()
8676
self.assertEqual(result, ["id-0"])
8777

8878
def test_batch_minus_one_single_request(self):
89-
"""B-1 records fit in one chunk."""
79+
"""_MULTIPLE_BATCH_SIZE-1 records fit in one chunk."""
9080
ids = [f"id-{i}" for i in range(_MULTIPLE_BATCH_SIZE - 1)]
9181
result = self._run(_MULTIPLE_BATCH_SIZE - 1, [_mock_create_response(ids)])
9282
self.od._execute_raw.assert_called_once()
9383
self.assertEqual(len(result), _MULTIPLE_BATCH_SIZE - 1)
9484

9585
def test_exact_batch_size_single_request(self):
96-
"""Exactly _MULTIPLE_BATCH_SIZE records one chunk, one request."""
86+
"""Exactly _MULTIPLE_BATCH_SIZE records produces one chunk and one request."""
9787
ids = [f"id-{i}" for i in range(_MULTIPLE_BATCH_SIZE)]
9888
result = self._run(_MULTIPLE_BATCH_SIZE, [_mock_create_response(ids)])
9989
self.od._execute_raw.assert_called_once()
10090
self.assertEqual(len(result), _MULTIPLE_BATCH_SIZE)
10191

10292
def test_batch_plus_one_two_requests(self):
103-
"""B+1 records two chunks, two requests."""
93+
"""_MULTIPLE_BATCH_SIZE+1 records produces two chunks and two requests."""
10494
ids1 = [f"id-{i}" for i in range(_MULTIPLE_BATCH_SIZE)]
10595
ids2 = ["id-last"]
10696
result = self._run(_MULTIPLE_BATCH_SIZE + 1, [_mock_create_response(ids1), _mock_create_response(ids2)])
10797
self.assertEqual(self.od._execute_raw.call_count, 2)
10898
self.assertEqual(len(result), _MULTIPLE_BATCH_SIZE + 1)
10999

110100
def test_two_full_batches(self):
111-
"""2*_MULTIPLE_BATCH_SIZE records two full chunks."""
101+
"""2*_MULTIPLE_BATCH_SIZE records produces two full chunks."""
112102
ids1 = [f"id-{i}" for i in range(_MULTIPLE_BATCH_SIZE)]
113103
ids2 = [f"id-{i}" for i in range(_MULTIPLE_BATCH_SIZE, 2 * _MULTIPLE_BATCH_SIZE)]
114104
result = self._run(2 * _MULTIPLE_BATCH_SIZE, [_mock_create_response(ids1), _mock_create_response(ids2)])
115105
self.assertEqual(self.od._execute_raw.call_count, 2)
116106
self.assertEqual(len(result), 2 * _MULTIPLE_BATCH_SIZE)
117107

118108
def test_two_batches_plus_one(self):
119-
"""2*_MULTIPLE_BATCH_SIZE+1 records three chunks."""
109+
"""2*_MULTIPLE_BATCH_SIZE+1 records produces three chunks."""
120110
se = [_mock_create_response([f"id-{j}" for j in range(_MULTIPLE_BATCH_SIZE)]) for _ in range(2)]
121111
se.append(_mock_create_response(["id-extra"]))
122112
result = self._run(2 * _MULTIPLE_BATCH_SIZE + 1, se)
@@ -131,11 +121,6 @@ def setUp(self):
131121
self.od = _make_odata_client()
132122
self.od._execute_raw = MagicMock(return_value=_mock_create_response([]))
133123

134-
def _captured_targets(self, call_index):
135-
"""Return the Targets list from the _build_create_multiple payload for a given call."""
136-
# _execute_raw is called with the result of _build_create_multiple, which
137-
# we can't easily inspect without going deeper. Instead, patch _build_create_multiple.
138-
return None # handled in test below
139124

140125
def test_first_chunk_has_batch_size_records(self):
141126
"""The first chunk sent to the server has exactly _MULTIPLE_BATCH_SIZE records."""
@@ -510,6 +495,36 @@ def test_paired_delegates_correctly(self):
510495
[{"accountid": "id-1", "name": "A"}, {"accountid": "id-2", "name": "B"}],
511496
)
512497

498+
def test_empty_ids_returns_none_without_delegating(self):
499+
"""Empty ids list returns immediately without calling _update_multiple."""
500+
result = self.od._update_by_ids("account", [], {"name": "X"})
501+
self.assertIsNone(result)
502+
self.od._update_multiple.assert_not_called()
503+
504+
def test_non_list_ids_raises_type_error(self):
505+
"""Non-list ids raises TypeError before any delegation."""
506+
with self.assertRaises(TypeError):
507+
self.od._update_by_ids("account", "id-1", {"name": "X"}) # type: ignore
508+
self.od._update_multiple.assert_not_called()
509+
510+
def test_changes_non_dict_non_list_raises_type_error(self):
511+
"""changes that is neither dict nor list raises TypeError."""
512+
with self.assertRaises(TypeError):
513+
self.od._update_by_ids("account", ["id-1"], "invalid") # type: ignore
514+
self.od._update_multiple.assert_not_called()
515+
516+
def test_changes_list_length_mismatch_raises_value_error(self):
517+
"""Paired changes list with different length from ids raises ValueError."""
518+
with self.assertRaises(ValueError):
519+
self.od._update_by_ids("account", ["id-1", "id-2"], [{"name": "A"}])
520+
self.od._update_multiple.assert_not_called()
521+
522+
def test_changes_list_non_dict_element_raises_type_error(self):
523+
"""Non-dict element in paired changes list raises TypeError."""
524+
with self.assertRaises(TypeError):
525+
self.od._update_by_ids("account", ["id-1", "id-2"], [{"name": "A"}, "bad"]) # type: ignore
526+
self.od._update_multiple.assert_not_called()
527+
513528

514529
# ---------------------------------------------------------------------------
515530
# Public API: records.create / records.update / records.upsert
@@ -567,6 +582,14 @@ def test_list_delegates_to_update_by_ids(self):
567582
"account", ["id-1", "id-2"], {"name": "X"}
568583
)
569584

585+
def test_list_paired_delegates_to_update_by_ids(self):
586+
"""Paired list-of-patches passes through to _update_by_ids unchanged."""
587+
ops, mock_odata = _make_records_client()
588+
ops.update("account", ["id-1", "id-2"], [{"name": "A"}, {"name": "B"}])
589+
mock_odata._update_by_ids.assert_called_once_with(
590+
"account", ["id-1", "id-2"], [{"name": "A"}, {"name": "B"}]
591+
)
592+
570593
def test_single_delegates_to_update(self):
571594
"""Single-record update calls _update, not _update_by_ids."""
572595
ops, mock_odata = _make_records_client()

0 commit comments

Comments
 (0)