Skip to content

Commit 11eab9f

Browse files
zhaodongwang-msftMax Wang
andauthored
retries for examples (#72)
* retries for examples * update per ai comments * improvments to retry --------- Co-authored-by: Max Wang <zhaodongwang@microsoft.com>
1 parent 43084b2 commit 11eab9f

File tree

4 files changed

+298
-81
lines changed

4 files changed

+298
-81
lines changed

examples/advanced/file_upload.py

Lines changed: 55 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def log(call: str):
6666

6767
# Simple SHA-256 helper with caching to avoid re-reading large files multiple times.
6868
_FILE_HASH_CACHE = {}
69-
69+
ATTRIBUTE_VISIBILITY_DELAYS = (0, 3, 10, 20, 35, 50, 70, 90, 120)
7070

7171
def file_sha256(path: Path): # returns (hex_digest, size_bytes)
7272
try:
@@ -153,22 +153,32 @@ def generate_test_pdf(size_mb: int = 10) -> Path:
153153
return test_file
154154

155155

156-
def backoff(op, *, delays=(0, 2, 5, 10), retry_status=(400, 403, 404, 409, 412, 429, 500, 502, 503, 504)):
156+
def backoff(op, *, delays=(0, 2, 5, 10, 20, 20)):
157157
last = None
158+
total_delay = 0
159+
attempts = 0
158160
for d in delays:
159161
if d:
160162
time.sleep(d)
163+
total_delay += d
164+
attempts += 1
161165
try:
162-
return op()
166+
result = op()
167+
if attempts > 1:
168+
retry_count = attempts - 1
169+
print(
170+
f" ↺ Backoff succeeded after {retry_count} retry(s); waited {total_delay}s total."
171+
)
172+
return result
163173
except Exception as ex: # noqa: BLE001
164174
last = ex
165-
r = getattr(ex, "response", None)
166-
code = getattr(r, "status_code", None)
167-
if isinstance(ex, requests.exceptions.HTTPError) and code in retry_status:
168-
continue
169-
# For non-HTTP errors just retry the schedule
170175
continue
171176
if last:
177+
if attempts:
178+
retry_count = max(attempts - 1, 0)
179+
print(
180+
f" ⚠ Backoff exhausted after {retry_count} retry(s); waited {total_delay}s total."
181+
)
172182
raise last
173183

174184

@@ -178,12 +188,12 @@ def backoff(op, *, delays=(0, 2, 5, 10), retry_status=(400, 403, 404, 409, 412,
178188

179189
def ensure_table():
180190
# Check by schema
181-
existing = client.get_table_info(TABLE_SCHEMA_NAME)
191+
existing = backoff(lambda: client.get_table_info(TABLE_SCHEMA_NAME))
182192
if existing:
183193
print({"table": TABLE_SCHEMA_NAME, "existed": True})
184194
return existing
185195
log("client.create_table('new_FileSample', schema={'new_Title': 'string'})")
186-
info = client.create_table(TABLE_SCHEMA_NAME, {"new_Title": "string"})
196+
info = backoff(lambda: client.create_table(TABLE_SCHEMA_NAME, {"new_Title": "string"}))
187197
print({"table": TABLE_SCHEMA_NAME, "existed": False, "metadata_id": info.get("metadata_id")})
188198
return info
189199

@@ -217,7 +227,7 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
217227
f"{odata.api}/EntityDefinitions({meta_id})/Attributes?$select=SchemaName&$filter="
218228
f"SchemaName eq '{schema_name}'"
219229
)
220-
r = odata._request("get", url)
230+
r = backoff(lambda: odata._request("get", url), delays=ATTRIBUTE_VISIBILITY_DELAYS)
221231
val = []
222232
try:
223233
val = r.json().get("value", [])
@@ -245,7 +255,7 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
245255
}
246256
try:
247257
url = f"{odata.api}/EntityDefinitions({meta_id})/Attributes"
248-
r = odata._request("post", url, json=payload)
258+
backoff(lambda: odata._request("post", url, json=payload), delays=ATTRIBUTE_VISIBILITY_DELAYS)
249259
print({f"{key_prefix}_file_attribute_created": True})
250260
time.sleep(2)
251261
return True
@@ -263,11 +273,39 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
263273
return False
264274

265275

276+
def wait_for_attribute_visibility(logical_name: str, label: str):
277+
if not logical_name or not entity_set:
278+
return False
279+
odata = client._get_odata()
280+
probe_url = f"{odata.api}/{entity_set}?$top=1&$select={logical_name}"
281+
waited = 0
282+
last_error = None
283+
for delay in ATTRIBUTE_VISIBILITY_DELAYS:
284+
if delay:
285+
time.sleep(delay)
286+
waited += delay
287+
try:
288+
resp = odata._request("get", probe_url)
289+
try:
290+
resp.json()
291+
except Exception: # noqa: BLE001
292+
pass
293+
if waited:
294+
print({f"{label}_attribute_visible_wait_seconds": waited})
295+
return True
296+
except Exception as ex: # noqa: BLE001
297+
last_error = ex
298+
continue
299+
raise RuntimeError(f"Timed out waiting for attribute '{logical_name}' to materialize") from last_error
300+
301+
266302
# Conditionally ensure each attribute only if its mode is selected
267303
if run_small:
268304
ensure_file_attribute_generic(small_file_attr_schema, "Small Document", "small")
305+
wait_for_attribute_visibility(small_file_attr_logical, "small")
269306
if run_chunk:
270307
ensure_file_attribute_generic(chunk_file_attr_schema, "Chunk Document", "chunk")
308+
wait_for_attribute_visibility(chunk_file_attr_logical, "chunk")
271309

272310
# --------------------------- Record create ---------------------------
273311
record_id = None
@@ -325,7 +363,7 @@ def get_dataset_info(file_path: Path):
325363
dl_url_single = (
326364
f"{odata.api}/{entity_set}({record_id})/{small_file_attr_logical}/$value" # raw entity_set URL OK
327365
)
328-
resp_single = odata._request("get", dl_url_single)
366+
resp_single = backoff(lambda: odata._request("get", dl_url_single))
329367
content_single = resp_single.content or b""
330368
import hashlib # noqa: WPS433
331369

@@ -355,7 +393,7 @@ def get_dataset_info(file_path: Path):
355393
)
356394
)
357395
print({"small_replace_upload_completed": True, "small_replace_source_size": replace_size_small})
358-
resp_single_replace = odata._request("get", dl_url_single)
396+
resp_single_replace = backoff(lambda: odata._request("get", dl_url_single))
359397
content_single_replace = resp_single_replace.content or b""
360398
downloaded_hash_replace = hashlib.sha256(content_single_replace).hexdigest() if content_single_replace else None
361399
hash_match_replace = (
@@ -397,7 +435,7 @@ def get_dataset_info(file_path: Path):
397435
dl_url_chunk = (
398436
f"{odata.api}/{entity_set}({record_id})/{chunk_file_attr_logical}/$value" # raw entity_set for download
399437
)
400-
resp_chunk = odata._request("get", dl_url_chunk)
438+
resp_chunk = backoff(lambda: odata._request("get", dl_url_chunk))
401439
content_chunk = resp_chunk.content or b""
402440
import hashlib # noqa: WPS433
403441

@@ -426,7 +464,7 @@ def get_dataset_info(file_path: Path):
426464
)
427465
)
428466
print({"chunk_replace_upload_completed": True})
429-
resp_chunk_replace = odata._request("get", dl_url_chunk)
467+
resp_chunk_replace = backoff(lambda: odata._request("get", dl_url_chunk))
430468
content_chunk_replace = resp_chunk_replace.content or b""
431469
dst_hash_chunk_replace = hashlib.sha256(content_chunk_replace).hexdigest() if content_chunk_replace else None
432470
hash_match_chunk_replace = (
@@ -459,7 +497,7 @@ def get_dataset_info(file_path: Path):
459497
if cleanup_table:
460498
try:
461499
log(f"client.delete_table('{TABLE_SCHEMA_NAME}')")
462-
client.delete_table(TABLE_SCHEMA_NAME)
500+
backoff(lambda: client.delete_table(TABLE_SCHEMA_NAME))
463501
print({"table_deleted": True})
464502
except Exception as e: # noqa: BLE001
465503
print({"table_deleted": False, "error": str(e)})

examples/advanced/walkthrough.py

Lines changed: 61 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,12 @@
1919

2020
import sys
2121
import json
22+
import time
2223
from enum import IntEnum
2324
from azure.identity import InteractiveBrowserCredential
2425
from PowerPlatform.Dataverse.client import DataverseClient
26+
from PowerPlatform.Dataverse.core.errors import MetadataError
27+
import requests
2528

2629

2730
# Simple logging helper
@@ -36,6 +39,35 @@ class Priority(IntEnum):
3639
HIGH = 3
3740

3841

42+
def backoff(op, *, delays=(0, 2, 5, 10, 20, 20)):
43+
last = None
44+
total_delay = 0
45+
attempts = 0
46+
for d in delays:
47+
if d:
48+
time.sleep(d)
49+
total_delay += d
50+
attempts += 1
51+
try:
52+
result = op()
53+
if attempts > 1:
54+
retry_count = attempts - 1
55+
print(
56+
f" ↺ Backoff succeeded after {retry_count} retry(s); waited {total_delay}s total."
57+
)
58+
return result
59+
except Exception as ex: # noqa: BLE001
60+
last = ex
61+
continue
62+
if last:
63+
if attempts:
64+
retry_count = max(attempts - 1, 0)
65+
print(
66+
f" ⚠ Backoff exhausted after {retry_count} retry(s); waited {total_delay}s total."
67+
)
68+
raise last
69+
70+
3971
def main():
4072
print("=" * 80)
4173
print("Dataverse SDK Walkthrough")
@@ -72,7 +104,7 @@ def main():
72104
table_name = "new_WalkthroughDemo"
73105

74106
log_call(f"client.get_table_info('{table_name}')")
75-
table_info = client.get_table_info(table_name)
107+
table_info = backoff(lambda: client.get_table_info(table_name))
76108

77109
if table_info:
78110
print(f"✓ Table already exists: {table_info.get('table_schema_name')}")
@@ -87,7 +119,7 @@ def main():
87119
"new_Completed": "bool",
88120
"new_Priority": Priority,
89121
}
90-
table_info = client.create_table(table_name, columns)
122+
table_info = backoff(lambda: client.create_table(table_name, columns))
91123
print(f"✓ Created table: {table_info.get('table_schema_name')}")
92124
print(f" Columns created: {', '.join(table_info.get('columns_created', []))}")
93125

@@ -107,7 +139,7 @@ def main():
107139
"new_Completed": False,
108140
"new_Priority": Priority.MEDIUM,
109141
}
110-
id1 = client.create(table_name, single_record)[0]
142+
id1 = backoff(lambda: client.create(table_name, single_record))[0]
111143
print(f"✓ Created single record: {id1}")
112144

113145
# Multiple create
@@ -135,7 +167,7 @@ def main():
135167
"new_Priority": Priority.HIGH,
136168
},
137169
]
138-
ids = client.create(table_name, multiple_records)
170+
ids = backoff(lambda: client.create(table_name, multiple_records))
139171
print(f"✓ Created {len(ids)} records: {ids}")
140172

141173
# ============================================================================
@@ -147,7 +179,7 @@ def main():
147179

148180
# Single read by ID
149181
log_call(f"client.get('{table_name}', '{id1}')")
150-
record = client.get(table_name, id1)
182+
record = backoff(lambda: client.get(table_name, id1))
151183
print("✓ Retrieved single record:")
152184
print(
153185
json.dumps(
@@ -167,7 +199,8 @@ def main():
167199
# Multiple read with filter
168200
log_call(f"client.get('{table_name}', filter='new_quantity gt 5')")
169201
all_records = []
170-
for page in client.get(table_name, filter="new_quantity gt 5"):
202+
records_iterator = backoff(lambda: client.get(table_name, filter="new_quantity gt 5"))
203+
for page in records_iterator:
171204
all_records.extend(page)
172205
print(f"✓ Found {len(all_records)} records with new_quantity > 5")
173206
for rec in all_records:
@@ -182,13 +215,13 @@ def main():
182215

183216
# Single update
184217
log_call(f"client.update('{table_name}', '{id1}', {{...}})")
185-
client.update(table_name, id1, {"new_Quantity": 100})
186-
updated = client.get(table_name, id1)
218+
backoff(lambda: client.update(table_name, id1, {"new_Quantity": 100}))
219+
updated = backoff(lambda: client.get(table_name, id1))
187220
print(f"✓ Updated single record new_Quantity: {updated.get('new_quantity')}")
188221

189222
# Multiple update (broadcast same change)
190223
log_call(f"client.update('{table_name}', [{len(ids)} IDs], {{...}})")
191-
client.update(table_name, ids, {"new_Completed": True})
224+
backoff(lambda: client.update(table_name, ids, {"new_Completed": True}))
192225
print(f"✓ Updated {len(ids)} records to new_Completed=True")
193226

194227
# ============================================================================
@@ -210,13 +243,14 @@ def main():
210243
}
211244
for i in range(1, 21)
212245
]
213-
paging_ids = client.create(table_name, paging_records)
246+
paging_ids = backoff(lambda: client.create(table_name, paging_records))
214247
print(f"✓ Created {len(paging_ids)} records for paging demo")
215248

216249
# Query with paging
217250
log_call(f"client.get('{table_name}', page_size=5)")
218251
print("Fetching records with page_size=5...")
219-
for page_num, page in enumerate(client.get(table_name, orderby=["new_Quantity"], page_size=5), start=1):
252+
paging_iterator = backoff(lambda: client.get(table_name, orderby=["new_Quantity"], page_size=5))
253+
for page_num, page in enumerate(paging_iterator, start=1):
220254
record_ids = [r.get("new_walkthroughdemoid")[:8] + "..." for r in page]
221255
print(f" Page {page_num}: {len(page)} records - IDs: {record_ids}")
222256

@@ -230,7 +264,7 @@ def main():
230264
log_call(f"client.query_sql('SELECT new_title, new_quantity FROM {table_name} WHERE new_completed = 1')")
231265
sql = f"SELECT new_title, new_quantity FROM new_walkthroughdemo WHERE new_completed = 1"
232266
try:
233-
results = client.query_sql(sql)
267+
results = backoff(lambda: client.query_sql(sql))
234268
print(f"✓ SQL query returned {len(results)} completed records:")
235269
for result in results[:5]: # Show first 5
236270
print(f" - new_Title='{result.get('new_title')}', new_Quantity={result.get('new_quantity')}")
@@ -252,8 +286,8 @@ def main():
252286
"new_Completed": False,
253287
"new_Priority": "High", # String label instead of int
254288
}
255-
label_id = client.create(table_name, label_record)[0]
256-
retrieved = client.get(table_name, label_id)
289+
label_id = backoff(lambda: client.create(table_name, label_record))[0]
290+
retrieved = backoff(lambda: client.get(table_name, label_id))
257291
print(f"✓ Created record with string label 'High' for new_Priority")
258292
print(f" new_Priority stored as integer: {retrieved.get('new_priority')}")
259293
print(f" new_Priority@FormattedValue: {retrieved.get('new_priority@OData.Community.Display.V1.FormattedValue')}")
@@ -266,12 +300,12 @@ def main():
266300
print("=" * 80)
267301

268302
log_call(f"client.create_columns('{table_name}', {{'new_Notes': 'string'}})")
269-
created_cols = client.create_columns(table_name, {"new_Notes": "string"})
303+
created_cols = backoff(lambda: client.create_columns(table_name, {"new_Notes": "string"}))
270304
print(f"✓ Added column: {created_cols[0]}")
271305

272306
# Delete the column we just added
273307
log_call(f"client.delete_columns('{table_name}', ['new_Notes'])")
274-
client.delete_columns(table_name, ["new_Notes"])
308+
backoff(lambda: client.delete_columns(table_name, ["new_Notes"]))
275309
print(f"✓ Deleted column: new_Notes")
276310

277311
# ============================================================================
@@ -283,12 +317,12 @@ def main():
283317

284318
# Single delete
285319
log_call(f"client.delete('{table_name}', '{id1}')")
286-
client.delete(table_name, id1)
320+
backoff(lambda: client.delete(table_name, id1))
287321
print(f"✓ Deleted single record: {id1}")
288322

289323
# Multiple delete (delete the paging demo records)
290324
log_call(f"client.delete('{table_name}', [{len(paging_ids)} IDs])")
291-
job_id = client.delete(table_name, paging_ids)
325+
job_id = backoff(lambda: client.delete(table_name, paging_ids))
292326
print(f"✓ Bulk delete job started: {job_id}")
293327
print(f" (Deleting {len(paging_ids)} paging demo records)")
294328

@@ -300,8 +334,15 @@ def main():
300334
print("=" * 80)
301335

302336
log_call(f"client.delete_table('{table_name}')")
303-
client.delete_table(table_name)
304-
print(f"✓ Deleted table: {table_name}")
337+
try:
338+
backoff(lambda: client.delete_table(table_name))
339+
print(f"✓ Deleted table: {table_name}")
340+
except Exception as ex: # noqa: BLE001
341+
code = getattr(getattr(ex, "response", None), "status_code", None)
342+
if (isinstance(ex, (requests.exceptions.HTTPError, MetadataError)) and code == 404):
343+
print(f"✓ Table removed: {table_name}")
344+
else:
345+
raise
305346

306347
# ============================================================================
307348
# SUMMARY

0 commit comments

Comments
 (0)