Skip to content

Commit fb467d8

Browse files
Copilotrchiodo
andauthored
Fix: ContinueRequest with specific threadId resumes all threads (in-process adapter fix) (#2012)
* Initial plan * Fix multi-thread resume when in-process debug adapter is used Per the DAP spec, a ContinueRequest should resume all threads unless singleThread=True is explicitly set. Previously, only the out-of-process adapter path worked correctly (it transformed threadId to '*' before forwarding to pydevd). With the in-process adapter, the specific threadId reached pydevd directly but was only used to resume that one thread. Fix on_continue_request to set thread_id='*' whenever singleThread is not True, regardless of multi_threads_single_notification. Also update write_continue test helper and add a regression test. Fixes: #2009 Co-authored-by: rchiodo <19672699+rchiodo@users.noreply.github.com> * Use getattr for safe singleThread attribute access in on_continue_request Most DAP clients omit the optional singleThread field entirely. Using getattr(arguments, 'singleThread', False) is more defensive than direct attribute access, guarding against any non-standard arguments objects. Co-authored-by: rchiodo <19672699+rchiodo@users.noreply.github.com> * Add timeout failure comment in test; keep intentional TEST SUCEEDED convention The 'TEST SUCEEDED' misspelling is an intentional convention in the pydevd test framework (debugger_unittest.py checks stdout for this exact string). Revert the resource file to preserve the convention while still adding the explanatory comment requested in the test method. Co-authored-by: rchiodo <19672699+rchiodo@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: rchiodo <19672699+rchiodo@users.noreply.github.com>
1 parent b7f2433 commit fb467d8

3 files changed

Lines changed: 79 additions & 7 deletions

File tree

src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -559,7 +559,15 @@ def on_continue_request(self, py_db, request):
559559
"""
560560
arguments = request.arguments # : :type arguments: ContinueArguments
561561
thread_id = arguments.threadId
562-
if py_db.multi_threads_single_notification:
562+
563+
# Per the DAP spec, the continue request resumes execution of all threads
564+
# unless singleThread is explicitly true (and the capability
565+
# supportsSingleThreadExecutionRequests is advertised). Only use the
566+
# specific threadId when singleThread is set; otherwise resume all.
567+
# Use getattr with a default of False since most DAP clients omit this
568+
# optional field entirely.
569+
single_thread = getattr(arguments, "singleThread", False)
570+
if not single_thread or py_db.multi_threads_single_notification:
563571
thread_id = "*"
564572

565573
def on_resumed():
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
"""
2+
Test for verifying that continuing from a breakpoint resumes all threads,
3+
not just the thread that hit the breakpoint.
4+
5+
When a specific threadId is sent in the ContinueRequest without singleThread=True,
6+
all threads should be resumed per the DAP spec.
7+
"""
8+
import threading
9+
10+
stop_event = threading.Event()
11+
12+
13+
def thread_func():
14+
stop_event.wait() # Thread 2 line - wait until signaled
15+
print("Thread finished")
16+
17+
18+
if __name__ == "__main__":
19+
t = threading.Thread(target=thread_func)
20+
t.start()
21+
22+
stop_event.set() # Break here - breakpoint on this line
23+
24+
t.join()
25+
print("TEST SUCEEDED!") # end

src/debugpy/_vendored/pydevd/tests_python/test_debugger_json.py

Lines changed: 45 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -390,12 +390,16 @@ def _by_type(self, *msgs):
390390
ret[msg.__class__] = msg
391391
return ret
392392

393-
def write_continue(self, wait_for_response=True, thread_id="*"):
394-
continue_request = self.write_request(pydevd_schema.ContinueRequest(pydevd_schema.ContinueArguments(threadId=thread_id)))
393+
def write_continue(self, wait_for_response=True, thread_id="*", single_thread=False):
394+
arguments = pydevd_schema.ContinueArguments(threadId=thread_id)
395+
if single_thread:
396+
arguments.singleThread = True
397+
continue_request = self.write_request(pydevd_schema.ContinueRequest(arguments))
395398

396399
if wait_for_response:
397-
if thread_id != "*":
398-
# event, response may be sent in any order
400+
if single_thread:
401+
# When singleThread=True, only the specified thread resumes.
402+
# ContinuedEvent and ContinueResponse may arrive in any order.
399403
msg1 = self.wait_for_json_message((ContinuedEvent, ContinueResponse))
400404
msg2 = self.wait_for_json_message((ContinuedEvent, ContinueResponse))
401405
by_type = self._by_type(msg1, msg2)
@@ -406,8 +410,10 @@ def write_continue(self, wait_for_response=True, thread_id="*"):
406410
assert continued_ev.body.allThreadsContinued == False
407411
assert continue_response.body.allThreadsContinued == False
408412
else:
409-
# The continued event is received before the response.
410-
self.wait_for_continued_event(all_threads_continued=True)
413+
# Default: all threads resume regardless of the threadId sent.
414+
# Per the DAP spec, singleThread must be explicitly True to
415+
# resume only one thread. Wait for the continue response with
416+
# allThreadsContinued=True.
411417
continue_response = self.wait_for_response(continue_request)
412418
assert continue_response.body.allThreadsContinued
413419

@@ -800,6 +806,9 @@ def test_case_json_suspend_notification(case_setup_dap):
800806
json_facade.write_make_initial_run()
801807

802808
json_hit = json_facade.wait_for_thread_stopped(line=break1_line)
809+
# Per the DAP spec, a ContinueRequest without singleThread=True must resume
810+
# all threads even when a specific threadId is provided. Verify the response
811+
# has allThreadsContinued=True (the correct behavior).
803812
json_facade.write_continue(thread_id=json_hit.thread_id)
804813

805814
json_hit = json_facade.wait_for_thread_stopped(line=break1_line)
@@ -808,6 +817,36 @@ def test_case_json_suspend_notification(case_setup_dap):
808817
writer.finished_ok = True
809818

810819

820+
def test_case_json_continue_all_threads(case_setup_dap):
821+
"""Regression test: ContinueRequest with a specific threadId (no singleThread=True)
822+
must resume ALL threads, not just the requested one. This tests the fix for the
823+
in-process debug adapter scenario where the adapter does not transform threadId to '*'.
824+
"""
825+
with case_setup_dap.test_file("_debugger_case_multi_threads_continue.py") as writer:
826+
json_facade = JsonFacade(writer)
827+
# Simulate the in-process adapter: disable single notification mode.
828+
# In this mode the server receives a specific threadId (not '*') directly
829+
# from the client without the adapter transforming it.
830+
json_facade.writer.write_multi_threads_single_notification(False)
831+
break_line = writer.get_line_index_with_content("Break here")
832+
json_facade.write_launch()
833+
json_facade.write_set_breakpoints(break_line)
834+
json_facade.write_make_initial_run()
835+
836+
# Wait for the breakpoint to be hit.
837+
json_hit = json_facade.wait_for_thread_stopped(line=break_line)
838+
839+
# Send ContinueRequest with the specific thread's id (no singleThread=True).
840+
# Per the DAP spec this must resume ALL threads, not just the specified one.
841+
# The response must have allThreadsContinued=True.
842+
# NOTE: If the fix regresses, the secondary thread stays blocked on
843+
# stop_event.wait() and the debuggee hangs on t.join(), causing a test
844+
# timeout rather than an explicit assertion failure.
845+
json_facade.write_continue(thread_id=json_hit.thread_id)
846+
847+
writer.finished_ok = True
848+
849+
811850
def test_case_handled_exception_no_break_on_generator(case_setup_dap):
812851
with case_setup_dap.test_file("_debugger_case_ignore_exceptions.py") as writer:
813852
json_facade = JsonFacade(writer)

0 commit comments

Comments
 (0)