Skip to content

Commit aa6e58a

Browse files
.
1 parent 917b8d4 commit aa6e58a

1 file changed

Lines changed: 0 additions & 121 deletions

File tree

sentry_sdk/integrations/anthropic.py

Lines changed: 0 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -75,127 +75,6 @@ def setup_once() -> None:
7575
Messages.create = _wrap_message_create(Messages.create)
7676
AsyncMessages.create = _wrap_message_create_async(AsyncMessages.create)
7777

78-
Stream.__iter__ = _wrap_stream_iter(Stream.__iter__)
79-
AsyncStream.__aiter__ = _wrap_async_stream_aiter(AsyncStream.__aiter__)
80-
81-
82-
def _wrap_stream_iter(
83-
f: "Callable[..., Iterator[RawMessageStreamEvent]]",
84-
) -> "Callable[..., Iterator[RawMessageStreamEvent]]":
85-
"""
86-
Sets information received while iterating the response stream on the AI Client Span.
87-
Responsible for closing the AI Client Span.
88-
"""
89-
90-
@wraps(f)
91-
def _patched_iter(self: "Stream") -> "Iterator[RawMessageStreamEvent]":
92-
if not hasattr(self, "_sentry_span"):
93-
for event in f(self):
94-
yield event
95-
return
96-
97-
model = None
98-
usage = _RecordedUsage()
99-
content_blocks: "list[str]" = []
100-
101-
for event in f(self):
102-
(
103-
model,
104-
usage,
105-
content_blocks,
106-
) = _collect_ai_data(
107-
event,
108-
model,
109-
usage,
110-
content_blocks,
111-
)
112-
yield event
113-
114-
# Anthropic's input_tokens excludes cached/cache_write tokens.
115-
# Normalize to total input tokens for correct cost calculations.
116-
total_input = (
117-
usage.input_tokens
118-
+ (usage.cache_read_input_tokens or 0)
119-
+ (usage.cache_write_input_tokens or 0)
120-
)
121-
122-
span = self._sentry_span
123-
integration = self._integration
124-
125-
_set_output_data(
126-
span=span,
127-
integration=integration,
128-
model=model,
129-
input_tokens=total_input,
130-
output_tokens=usage.output_tokens,
131-
cache_read_input_tokens=usage.cache_read_input_tokens,
132-
cache_write_input_tokens=usage.cache_write_input_tokens,
133-
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
134-
finish_span=True,
135-
)
136-
137-
return _patched_iter
138-
139-
140-
def _wrap_async_stream_aiter(
141-
f: "Callable[..., AsyncIterator[RawMessageStreamEvent]]",
142-
) -> "Callable[..., AsyncIterator[RawMessageStreamEvent]]":
143-
"""
144-
Sets information received while iterating the response stream on the AI Client Span.
145-
Responsible for closing the AI Client Span.
146-
"""
147-
148-
@wraps(f)
149-
async def _patched_aiter(
150-
self: "AsyncStream",
151-
) -> "AsyncIterator[RawMessageStreamEvent]":
152-
if not hasattr(self, "_sentry_span"):
153-
async for event in f(self):
154-
yield event
155-
return
156-
157-
model = None
158-
usage = _RecordedUsage()
159-
content_blocks: "list[str]" = []
160-
161-
async for event in f(self):
162-
(
163-
model,
164-
usage,
165-
content_blocks,
166-
) = _collect_ai_data(
167-
event,
168-
model,
169-
usage,
170-
content_blocks,
171-
)
172-
yield event
173-
174-
# Anthropic's input_tokens excludes cached/cache_write tokens.
175-
# Normalize to total input tokens for correct cost calculations.
176-
total_input = (
177-
usage.input_tokens
178-
+ (usage.cache_read_input_tokens or 0)
179-
+ (usage.cache_write_input_tokens or 0)
180-
)
181-
182-
span = self._sentry_span
183-
integration = self._integration
184-
185-
_set_output_data(
186-
span=span,
187-
integration=integration,
188-
model=model,
189-
input_tokens=total_input,
190-
output_tokens=usage.output_tokens,
191-
cache_read_input_tokens=usage.cache_read_input_tokens,
192-
cache_write_input_tokens=usage.cache_write_input_tokens,
193-
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
194-
finish_span=True,
195-
)
196-
197-
return _patched_aiter
198-
19978

20079
def _capture_exception(exc: "Any") -> None:
20180
set_span_errored()

0 commit comments

Comments
 (0)