|
| 1 | +import os |
| 2 | + |
| 3 | +from openai import OpenAI |
| 4 | + |
| 5 | +import sentry_sdk |
| 6 | +from sentry_sdk.ai.monitoring import ai_track |
| 7 | +from sentry_sdk.integrations.openai import OpenAIIntegration |
| 8 | +from sentry_sdk.integrations.stdlib import StdlibIntegration |
| 9 | +import dotenv |
| 10 | + |
| 11 | +dotenv.load_dotenv() |
| 12 | + |
| 13 | + |
| 14 | +def generate_long_message(index): |
| 15 | + base_message = f"Message number {index}: " |
| 16 | + filler = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. " |
| 17 | + |
| 18 | + message = base_message |
| 19 | + while len(message) < 1000: |
| 20 | + message += filler |
| 21 | + |
| 22 | + return message[:1000] |
| 23 | + |
| 24 | + |
| 25 | +@ai_track("My truncation test workflow") |
| 26 | +def my_truncation_workflow(client): |
| 27 | + with sentry_sdk.start_transaction(name="openai-truncation-test"): |
| 28 | + messages = [] |
| 29 | + |
| 30 | + for i in range(25): |
| 31 | + if i % 2 == 0: |
| 32 | + messages.append({ |
| 33 | + "role": "user", |
| 34 | + "content": generate_long_message(i) |
| 35 | + }) |
| 36 | + else: |
| 37 | + messages.append({ |
| 38 | + "role": "assistant", |
| 39 | + "content": generate_long_message(i) |
| 40 | + }) |
| 41 | + |
| 42 | + messages.append({ |
| 43 | + "role": "user", |
| 44 | + "content": "Please summarize our conversation so far in one sentence." |
| 45 | + }) |
| 46 | + |
| 47 | + print(f"Total messages: {len(messages)}") |
| 48 | + total_chars = sum(len(msg["content"]) for msg in messages) |
| 49 | + print(f"Total characters in messages: {total_chars}") |
| 50 | + print(f"Approximate size in KB: {total_chars / 1024:.2f}") |
| 51 | + |
| 52 | + response = client.chat.completions.create( |
| 53 | + messages=messages, |
| 54 | + model="gpt-4o-mini", |
| 55 | + max_tokens=100, |
| 56 | + temperature=0.7, |
| 57 | + ) |
| 58 | + |
| 59 | + print("--------------------------------") |
| 60 | + print("Response:") |
| 61 | + print(response.model_dump()) |
| 62 | + print("--------------------------------") |
| 63 | + print("Assistant reply:") |
| 64 | + print(response.choices[0].message.content) |
| 65 | + |
| 66 | + |
| 67 | +def main(): |
| 68 | + sentry_sdk.init( |
| 69 | + dsn=os.getenv("SENTRY_DSN", None), |
| 70 | + environment=os.getenv("ENV", "openai-test-truncation"), |
| 71 | + traces_sample_rate=1.0, |
| 72 | + profiles_sample_rate=1.0, |
| 73 | + send_default_pii=True, |
| 74 | + debug=True, |
| 75 | + integrations=[ |
| 76 | + OpenAIIntegration( |
| 77 | + include_prompts=True, |
| 78 | + tiktoken_encoding_name="cl100k_base", |
| 79 | + ), |
| 80 | + ], |
| 81 | + disabled_integrations=[ |
| 82 | + StdlibIntegration(), |
| 83 | + ], |
| 84 | + ) |
| 85 | + |
| 86 | + client = OpenAI( |
| 87 | + api_key=os.environ.get("OPENAI_API_KEY"), |
| 88 | + ) |
| 89 | + |
| 90 | + my_truncation_workflow(client) |
| 91 | + |
| 92 | + print("--------------------------------") |
| 93 | + print("Done!") |
| 94 | + |
| 95 | + |
| 96 | +if __name__ == "__main__": |
| 97 | + main() |
| 98 | + |
0 commit comments