diff --git a/apps/docs/content/docs/en/execution/api.mdx b/apps/docs/content/docs/en/execution/api.mdx index 61166941fb4..5e8e2ea07ca 100644 --- a/apps/docs/content/docs/en/execution/api.mdx +++ b/apps/docs/content/docs/en/execution/api.mdx @@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout' import { Tab, Tabs } from 'fumadocs-ui/components/tabs' import { Video } from '@/components/ui/video' -Sim provides a comprehensive external API for querying workflow execution logs and setting up webhooks for real-time notifications when workflows complete. +Sim provides a comprehensive external API for querying workflow run logs and setting up webhooks for real-time notifications when workflows complete. ## Authentication @@ -21,7 +21,7 @@ You can generate API keys from the Sim platform and navigate to **Settings**, th ## Logs API -All API responses include information about your workflow execution limits and usage: +All API responses include information about your workflow run limits and usage: ```json "limits": { @@ -48,11 +48,11 @@ All API responses include information about your workflow execution limits and u } ``` -**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow executions. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`). +**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow runs. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`). ### Query Logs -Query workflow execution logs with extensive filtering options. +Query workflow run logs with extensive filtering options. @@ -70,11 +70,11 @@ Query workflow execution logs with extensive filtering options. - `level` - Filter by level: `info`, `error` - `startDate` - ISO timestamp for date range start - `endDate` - ISO timestamp for date range end - - `executionId` - Exact execution ID match - - `minDurationMs` - Minimum execution duration in milliseconds - - `maxDurationMs` - Maximum execution duration in milliseconds - - `minCost` - Minimum execution cost - - `maxCost` - Maximum execution cost + - `executionId` - Exact run ID match + - `minDurationMs` - Minimum run duration in milliseconds + - `maxDurationMs` - Maximum run duration in milliseconds + - `minCost` - Minimum run cost + - `maxCost` - Maximum run cost - `model` - Filter by AI model used **Pagination:** @@ -213,9 +213,9 @@ Retrieve detailed information about a specific log entry. -### Get Execution Details +### Get Run Details -Retrieve execution details including the workflow state snapshot. +Retrieve run details including the workflow state snapshot. @@ -248,7 +248,7 @@ Retrieve execution details including the workflow state snapshot. ## Notifications -Get real-time notifications when workflow executions complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page. +Get real-time notifications when workflow runs complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page. ### Configuration @@ -256,7 +256,7 @@ Configure notifications from the Logs page by clicking the menu button and selec **Notification Channels:** - **Webhook**: Send HTTP POST requests to your endpoint -- **Email**: Receive email notifications with execution details +- **Email**: Receive email notifications with run details - **Slack**: Post messages to a Slack channel **Workflow Selection:** @@ -269,38 +269,38 @@ Configure notifications from the Logs page by clicking the menu button and selec **Optional Data:** - `includeFinalOutput`: Include the workflow's final output -- `includeTraceSpans`: Include detailed execution trace spans +- `includeTraceSpans`: Include detailed trace spans - `includeRateLimits`: Include rate limit information (sync/async limits and remaining) - `includeUsageData`: Include billing period usage and limits ### Alert Rules -Instead of receiving notifications for every execution, configure alert rules to be notified only when issues are detected: +Instead of receiving notifications for every run, configure alert rules to be notified only when issues are detected: **Consecutive Failures** -- Alert after X consecutive failed executions (e.g., 3 failures in a row) -- Resets when an execution succeeds +- Alert after X consecutive failed runs (e.g., 3 failures in a row) +- Resets when a run succeeds **Failure Rate** - Alert when failure rate exceeds X% over the last Y hours -- Requires minimum 5 executions in the window +- Requires minimum 5 runs in the window - Only triggers after the full time window has elapsed **Latency Threshold** -- Alert when any execution takes longer than X seconds +- Alert when any run takes longer than X seconds - Useful for catching slow or hanging workflows **Latency Spike** -- Alert when execution is X% slower than the average +- Alert when a run is X% slower than the average - Compares against the average duration over the configured time window -- Requires minimum 5 executions to establish baseline +- Requires minimum 5 runs to establish baseline **Cost Threshold** -- Alert when a single execution costs more than $X +- Alert when a single run costs more than $X - Useful for catching expensive LLM calls **No Activity** -- Alert when no executions occur within X hours +- Alert when no runs occur within X hours - Useful for monitoring scheduled workflows that should run regularly **Error Count** @@ -317,7 +317,7 @@ For webhooks, additional options are available: ### Payload Structure -When a workflow execution completes, Sim sends the following payload (via webhook POST, email, or Slack): +When a workflow run completes, Sim sends the following payload (via webhook POST, email, or Slack): ```json { @@ -456,7 +456,7 @@ Failed webhook deliveries are retried with exponential backoff and jitter: - Deliveries timeout after 30 seconds - Webhook deliveries are processed asynchronously and don't affect workflow execution performance. + Webhook deliveries are processed asynchronously and don't affect workflow run performance. ## Best Practices @@ -596,11 +596,11 @@ app.listen(3000, () => { import { FAQ } from '@/components/ui/faq' Sim Keys in the platform. Workflows with public API access enabled can also be called without authentication." }, { question: "How does the webhook retry policy work?", answer: "Failed webhook deliveries are retried up to 5 times with exponential backoff: 5 seconds, 15 seconds, 1 minute, 3 minutes, and 10 minutes, plus up to 10% jitter. Only HTTP 5xx and 429 responses trigger retries. Each delivery times out after 30 seconds." }, - { question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow execution rate limits, which are shown in the response body." }, + { question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow run rate limits, which are shown in the response body." }, { question: "How do I verify that a webhook is from Sim?", answer: "Configure a webhook secret when setting up notifications. Sim signs each delivery with HMAC-SHA256 using the format 't={timestamp},v1={signature}' in the sim-signature header. Compute the HMAC of '{timestamp}.{body}' with your secret and compare it to the signature value." }, { question: "What alert rules are available for notifications?", answer: "You can configure alerts for consecutive failures, failure rate thresholds, latency thresholds, latency spikes (percentage above average), cost thresholds, no-activity periods, and error counts within a time window. All alert types include a 1-hour cooldown to prevent notification spam." }, - { question: "Can I filter which executions trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." }, + { question: "Can I filter which runs trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." }, ]} /> diff --git a/apps/docs/content/docs/en/execution/basics.mdx b/apps/docs/content/docs/en/execution/basics.mdx index 1777b7fdcfb..60dc79cbd33 100644 --- a/apps/docs/content/docs/en/execution/basics.mdx +++ b/apps/docs/content/docs/en/execution/basics.mdx @@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout' import { Card, Cards } from 'fumadocs-ui/components/card' import { Image } from '@/components/ui/image' -Understanding how workflows execute in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably. +Understanding how workflows run in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably. ## How Workflows Execute @@ -14,7 +14,7 @@ Sim's execution engine processes workflows intelligently by analyzing dependenci ### Concurrent Execution by Default -Multiple blocks run concurrently when they don't depend on each other. This parallel execution dramatically improves performance without requiring manual configuration. +Multiple blocks run concurrently when they don't depend on each other. This dramatically improves performance without requiring manual configuration. -This workflow demonstrates how execution can follow different paths based on conditions or AI decisions, with each path executing independently. +This workflow demonstrates how a run can follow different paths based on conditions or AI decisions, with each path running independently. ## Block Types @@ -57,7 +57,7 @@ Sim provides different types of blocks that serve specific purposes in your work - **Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin execution. + **Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin a run. @@ -73,37 +73,37 @@ Sim provides different types of blocks that serve specific purposes in your work -All blocks execute automatically based on their dependencies - you don't need to manually manage execution order or timing. +All blocks run automatically based on their dependencies - you don't need to manually manage run order or timing. -## Execution Monitoring +## Run Monitoring -When workflows run, Sim provides real-time visibility into the execution process: +When workflows run, Sim provides real-time visibility into the process: -- **Live Block States**: See which blocks are currently executing, completed, or failed -- **Execution Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors -- **Performance Metrics**: Track execution time and costs for each block -- **Path Visualization**: Understand which execution paths were taken through your workflow +- **Live Block States**: See which blocks are currently running, completed, or failed +- **Run Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors +- **Performance Metrics**: Track run time and costs for each block +- **Path Visualization**: Understand which paths were taken through your workflow - All execution details are captured and available for review even after workflows complete, helping with debugging and optimization. + All run details are captured and available for review even after workflows complete, helping with debugging and optimization. -## Key Execution Principles +## Key Principles Understanding these core principles will help you build better workflows: 1. **Dependency-Based Execution**: Blocks only run when all their dependencies have completed 2. **Automatic Parallelization**: Independent blocks run concurrently without configuration 3. **Smart Data Flow**: Outputs flow automatically to connected blocks -4. **Error Handling**: Failed blocks stop their execution path but don't affect independent paths -5. **Response Blocks as Exit Points**: When a Response block executes, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to execute wins -6. **State Persistence**: All block outputs and execution details are preserved for debugging -7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops +4. **Error Handling**: Failed blocks stop their run path but don't affect independent paths +5. **Response Blocks as Exit Points**: When a Response block runs, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to run wins +6. **State Persistence**: All block outputs and run details are preserved for debugging +7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, the run is stopped to prevent infinite loops ## Next Steps Now that you understand execution basics, explore: - **[Block Types](/blocks)** - Learn about specific block capabilities -- **[Logging](/execution/logging)** - Monitor workflow executions and debug issues +- **[Logging](/execution/logging)** - Monitor workflow runs and debug issues - **[Cost Calculation](/execution/costs)** - Understand and optimize workflow costs - **[Triggers](/triggers)** - Set up different ways to run your workflows diff --git a/apps/docs/content/docs/en/execution/costs.mdx b/apps/docs/content/docs/en/execution/costs.mdx index 13b23f4ce78..a2391a0b83a 100644 --- a/apps/docs/content/docs/en/execution/costs.mdx +++ b/apps/docs/content/docs/en/execution/costs.mdx @@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout' import { Tab, Tabs } from 'fumadocs-ui/components/tabs' import { Image } from '@/components/ui/image' -Sim automatically calculates costs for all workflow executions, providing transparent pricing based on AI model usage and execution charges. Understanding these costs helps you optimize workflows and manage your budget effectively. +Sim automatically calculates costs for all workflow runs, providing transparent pricing based on AI model usage and run charges. Understanding these costs helps you optimize workflows and manage your budget effectively. ## Credits @@ -16,18 +16,18 @@ All plan limits, usage meters, and billing thresholds are displayed in credits t ## How Costs Are Calculated -Every workflow execution includes two cost components: +Every workflow run includes two cost components: -**Base Execution Charge**: 1 credit ($0.005) per execution +**Base Run Charge**: 1 credit ($0.005) per run **AI Model Usage**: Variable cost based on token consumption ```javascript modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000 -totalCredits = baseExecutionCharge + modelCost × 200 +totalCredits = baseRunCharge + modelCost × 200 ``` - AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base execution charge. + AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base run charge. ## Model Breakdown in Logs @@ -48,7 +48,7 @@ The model breakdown shows: - **Token Usage**: Input and output token counts for each model - **Cost Breakdown**: Individual costs per model and operation - **Model Distribution**: Which models were used and how many times -- **Total Cost**: Aggregate cost for the entire workflow execution +- **Total Cost**: Aggregate cost for the entire workflow run ## Pricing Options @@ -330,18 +330,18 @@ Max (individual) shares the same rate limits as team plans. Team plans (Pro or M Team plans (Pro or Max for Teams) use 500 GB. -### Execution Time Limits +### Run Time Limits | Plan | Sync | Async | |------|------|-------| | **Free** | 5 minutes | 90 minutes | | **Pro / Max / Team / Enterprise** | 50 minutes | 90 minutes | -**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI. -**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background. +**Sync runs** complete immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI. +**Async runs** (triggered via API with `async: true`, webhooks, or schedules) run in the background. - If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows. + If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async runs or break them into smaller workflows. ## Billing Model @@ -452,18 +452,18 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt ## Next Steps - Review your current usage in [Settings → Subscription](https://sim.ai/settings/subscription) -- Learn about [Logging](/execution/logging) to track execution details +- Learn about [Logging](/execution/logging) to track run details - Explore the [External API](/execution/api) for programmatic cost monitoring - Check out [workflow optimization techniques](/blocks) to reduce costs import { FAQ } from '@/components/ui/faq' diff --git a/apps/docs/content/docs/en/execution/files.mdx b/apps/docs/content/docs/en/execution/files.mdx index 4a7093c9b43..41c04692c4f 100644 --- a/apps/docs/content/docs/en/execution/files.mdx +++ b/apps/docs/content/docs/en/execution/files.mdx @@ -156,7 +156,7 @@ Use `url` for direct downloads or `base64` for inline processing. - **Dropbox** - Dropbox file operations - Files are automatically available to downstream blocks. The execution engine handles all file transfer and format conversion. + Files are automatically available to downstream blocks. The engine handles all file transfer and format conversion. ## Best Practices @@ -165,15 +165,15 @@ Use `url` for direct downloads or `base64` for inline processing. 2. **Check file types** - Ensure the file type matches what the receiving block expects. The Vision block needs images, the File block handles documents. -3. **Consider file size** - Large files increase execution time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage. +3. **Consider file size** - Large files increase run time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage. import { FAQ } from '@/components/ui/faq' ) and the receiving block will extract the data it needs." }, - { question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the execution engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." }, + { question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." }, ]} /> diff --git a/apps/docs/content/docs/en/execution/index.mdx b/apps/docs/content/docs/en/execution/index.mdx index cc80425ecfb..7b677473137 100644 --- a/apps/docs/content/docs/en/execution/index.mdx +++ b/apps/docs/content/docs/en/execution/index.mdx @@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card' import { Image } from '@/components/ui/image' import { FAQ } from '@/components/ui/faq' -Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows are executed in Sim. +Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows run in Sim. - Every workflow execution follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results. + Every workflow run follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results. ## Documentation Overview @@ -22,33 +22,33 @@ Sim's execution engine brings your workflows to life by processing blocks in the - Monitor workflow executions with comprehensive logging and real-time visibility + Monitor workflow runs with comprehensive logging and real-time visibility - + - Understand how workflow execution costs are calculated and optimized + Understand how workflow run costs are calculated and optimized - + - Access execution logs and set up webhooks programmatically via REST API + Access run logs and set up webhooks programmatically via REST API ## Key Concepts ### Topological Execution -Blocks execute in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies. +Blocks run in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies. ### Path Tracking -The engine actively tracks execution paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks execute. +The engine actively tracks run paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks run. ### Layer-Based Processing Instead of executing blocks one-by-one, the engine identifies layers of blocks that can run in parallel, optimizing performance for complex workflows. -### Execution Context -Each workflow maintains a rich context during execution containing: +### Run Context +Each workflow maintains a rich context during a run containing: - Block outputs and states -- Active execution paths +- Active run paths - Loop and parallel iteration tracking - Environment variables - Routing decisions @@ -56,7 +56,7 @@ Each workflow maintains a rich context during execution containing: ## Deployment Snapshots -API, Chat, Schedule, and Webhook executions run against the workflow’s active deployment snapshot. Manual runs from the editor execute the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version. +API, Chat, Schedule, and Webhook runs use the workflow’s active deployment snapshot. Manual runs from the editor use the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
diff --git a/apps/docs/content/docs/en/execution/logging.mdx b/apps/docs/content/docs/en/execution/logging.mdx index 12376de01aa..dbbf50a3835 100644 --- a/apps/docs/content/docs/en/execution/logging.mdx +++ b/apps/docs/content/docs/en/execution/logging.mdx @@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout' import { Tab, Tabs } from 'fumadocs-ui/components/tabs' import { Image } from '@/components/ui/image' -Sim provides comprehensive logging for all workflow executions, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur. +Sim provides comprehensive logging for all workflow runs, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur. ## Logging System @@ -14,7 +14,7 @@ Sim offers two complementary logging interfaces to match different workflows and ### Real-Time Console -During manual or chat workflow execution, logs appear in real-time in the Console panel on the right side of the workflow editor: +During manual or chat workflow runs, logs appear in real-time in the Console panel on the right side of the workflow editor:
The console shows: -- Block execution progress with active block highlighting +- Block progress with active block highlighting - Real-time outputs as blocks complete -- Execution timing for each block +- Timing for each block - Success/error status indicators ### Logs Page -All workflow executions—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page: +All workflow runs—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
- **Output Tab** shows the block's execution result: + **Output Tab** shows the block's result: - Structured data with JSON formatting - Markdown rendering for AI-generated content - Copy button for easy data extraction @@ -87,17 +87,17 @@ View the complete data flow for each block with tabs to switch between: -### Execution Timeline +### Run Timeline -For workflow-level logs, view detailed execution metrics: +For workflow-level logs, view detailed run metrics: - Start and end timestamps - Total workflow duration -- Individual block execution times +- Individual block run times - Performance bottleneck identification ## Workflow Snapshots -For any logged execution, click "View Snapshot" to see the exact workflow state at execution time: +For any logged run, click "View Snapshot" to see the exact workflow state at the time of the run:
- Workflow snapshots are only available for executions after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message. + Workflow snapshots are only available for runs after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message. ## Log Retention @@ -134,11 +134,11 @@ The snapshot provides: ### For Production - Monitor the Logs page regularly for errors or performance issues - Set up filters to focus on specific workflows or time periods -- Use live mode during critical deployments to watch executions in real-time +- Use live mode during critical deployments to watch runs in real-time ### For Debugging -- Always check the execution timeline to identify slow blocks -- Compare inputs between working and failing executions +- Always check the run timeline to identify slow blocks +- Compare inputs between working and failing runs - Use workflow snapshots to see the exact state when issues occurred ## Next Steps @@ -150,10 +150,10 @@ The snapshot provides: import { FAQ } from '@/components/ui/faq' \ No newline at end of file diff --git a/apps/docs/content/docs/en/tools/jira.mdx b/apps/docs/content/docs/en/tools/jira.mdx index db433a3d4d9..17742deeda2 100644 --- a/apps/docs/content/docs/en/tools/jira.mdx +++ b/apps/docs/content/docs/en/tools/jira.mdx @@ -251,7 +251,7 @@ Update a Jira issue | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `issueKey` | string | Yes | Jira issue key to update \(e.g., PROJ-123\) | | `summary` | string | No | New summary for the issue | -| `description` | string | No | New description for the issue | +| `description` | string | No | New description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object | | `priority` | string | No | New priority ID or name for the issue \(e.g., "High"\) | | `assignee` | string | No | New assignee account ID for the issue | | `labels` | json | No | Labels to set on the issue \(array of label name strings\) | @@ -284,7 +284,7 @@ Create a new Jira issue | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `projectId` | string | Yes | Jira project key \(e.g., PROJ\) | | `summary` | string | Yes | Summary for the issue | -| `description` | string | No | Description for the issue | +| `description` | string | No | Description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object | | `priority` | string | No | Priority ID or name for the issue \(e.g., "10000" or "High"\) | | `assignee` | string | No | Assignee account ID for the issue | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | diff --git a/apps/docs/content/docs/en/tools/microsoft_excel.mdx b/apps/docs/content/docs/en/tools/microsoft_excel.mdx index 4da61a5b9cd..08733e5eb30 100644 --- a/apps/docs/content/docs/en/tools/microsoft_excel.mdx +++ b/apps/docs/content/docs/en/tools/microsoft_excel.mdx @@ -45,6 +45,7 @@ Read data from a specific sheet in a Microsoft Excel spreadsheet | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to read from \(e.g., "01ABC123DEF456"\) | +| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. | | `range` | string | No | The range of cells to read from. Accepts "SheetName!A1:B2" for explicit ranges or just "SheetName" to read the used range of that sheet. If omitted, reads the used range of the first sheet. | #### Output @@ -67,6 +68,7 @@ Write data to a specific sheet in a Microsoft Excel spreadsheet | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to write to \(e.g., "01ABC123DEF456"\) | +| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. | | `range` | string | No | The range of cells to write to \(e.g., "Sheet1!A1:B2"\) | | `values` | array | Yes | The data to write as a 2D array \(e.g., \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects | | `valueInputOption` | string | No | The format of the data to write | diff --git a/apps/docs/content/docs/en/triggers/index.mdx b/apps/docs/content/docs/en/triggers/index.mdx index 29279243cf5..99369685055 100644 --- a/apps/docs/content/docs/en/triggers/index.mdx +++ b/apps/docs/content/docs/en/triggers/index.mdx @@ -29,7 +29,7 @@ Use the Start block for everything originating from the editor, deploy-to-API, o Receive external webhook payloads - Cron or interval based execution + Cron or interval based runs Monitor RSS and Atom feeds for new content @@ -59,17 +59,17 @@ Use the Start block for everything originating from the editor, deploy-to-API, o > Deployments power every trigger. Update the workflow, redeploy, and all trigger entry points pick up the new snapshot. Learn more in [Execution → Deployment Snapshots](/execution). -## Manual Execution Priority +## Manual Run Priority -When you click **Run** in the editor, Sim automatically selects which trigger to execute based on the following priority order: +When you click **Run** in the editor, Sim automatically selects which trigger to run based on the following priority order: 1. **Start Block** (highest priority) 2. **Schedule Triggers** 3. **External Triggers** (webhooks, integrations like Slack, Gmail, Airtable, etc.) -If your workflow has multiple triggers, the highest priority trigger will be executed. For example, if you have both a Start block and a Webhook trigger, clicking Run will execute the Start block. +If your workflow has multiple triggers, the highest priority trigger will be used. For example, if you have both a Start block and a Webhook trigger, clicking Run will use the Start block. -**External triggers with mock payloads**: When external triggers (webhooks and integrations) are executed manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing. +**External triggers with mock payloads**: When external triggers (webhooks and integrations) are run manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing. ## Email Polling Groups @@ -94,10 +94,10 @@ Invitees receive an email with a link to connect their account. Once connected, When configuring an email trigger, select your polling group from the credentials dropdown instead of an individual account. The system creates webhooks for each member and routes all emails through your workflow. diff --git a/apps/docs/content/docs/en/triggers/rss.mdx b/apps/docs/content/docs/en/triggers/rss.mdx index 3f15c384d70..93f86d35878 100644 --- a/apps/docs/content/docs/en/triggers/rss.mdx +++ b/apps/docs/content/docs/en/triggers/rss.mdx @@ -51,9 +51,9 @@ RSS triggers only fire for items published after you save the trigger. Existing diff --git a/apps/docs/content/docs/en/triggers/schedule.mdx b/apps/docs/content/docs/en/triggers/schedule.mdx index 2bd193191c5..8c59882ceb2 100644 --- a/apps/docs/content/docs/en/triggers/schedule.mdx +++ b/apps/docs/content/docs/en/triggers/schedule.mdx @@ -79,10 +79,10 @@ Schedule blocks cannot receive incoming connections and serve as workflow entry diff --git a/apps/docs/content/docs/en/triggers/start.mdx b/apps/docs/content/docs/en/triggers/start.mdx index 8997372e8eb..672da65024a 100644 --- a/apps/docs/content/docs/en/triggers/start.mdx +++ b/apps/docs/content/docs/en/triggers/start.mdx @@ -19,12 +19,12 @@ The Start block is the default trigger for workflows built in Sim. It collects s
-The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven execution. +The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven runs. ## Fields exposed by Start -The Start block emits different data depending on the execution surface: +The Start block emits different data depending on the run surface: - **Input Format fields** — Every field you add becomes available as <start.fieldName>. For example, a `customerId` field shows up as <start.customerId> in downstream blocks and templates. - **Chat-only fields** — When the workflow runs from the chat side panel or a deployed chat experience, Sim also provides <start.input> (latest user message), <start.conversationId> (active session id), and <start.files> (chat attachments). @@ -33,11 +33,11 @@ Keep Input Format fields scoped to the names you expect to reference later—tho ## Configure the Input Format -Use the Input Format sub-block to define the schema that applies across execution modes: +Use the Input Format sub-block to define the schema that applies across run modes: 1. Add a field for each value you want to collect. 2. Choose a type (`string`, `number`, `boolean`, `object`, `array`, or `files`). File fields accept uploads from chat and API callers. -3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed executions. +3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed runs. 4. Reorder fields to control how they appear in the editor form. Reference structured values downstream with expressions such as <start.customerId> depending on the block you connect. @@ -53,7 +53,7 @@ Reference structured values downstream with expressions such as <start. tools or storage steps. - Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the execution file upload endpoint before invoking the workflow. + Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the file upload endpoint before invoking the workflow. API callers can include additional optional properties. They are preserved inside <start.fieldName> outputs so you can experiment diff --git a/apps/docs/content/docs/en/triggers/webhook.mdx b/apps/docs/content/docs/en/triggers/webhook.mdx index d897db7681b..33d3a6dd952 100644 --- a/apps/docs/content/docs/en/triggers/webhook.mdx +++ b/apps/docs/content/docs/en/triggers/webhook.mdx @@ -8,7 +8,7 @@ import { Image } from '@/components/ui/image' import { Video } from '@/components/ui/video' import { FAQ } from '@/components/ui/faq' -Webhooks allow external services to trigger workflow execution by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers. +Webhooks allow external services to trigger workflow runs by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers. ## Generic Webhook Trigger @@ -30,7 +30,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo 2. **Configure Payload** - Set up the expected payload structure (optional) 3. **Get Webhook URL** - Copy the automatically generated unique endpoint 4. **External Integration** - Configure your external service to send POST requests to this URL -5. **Workflow Execution** - Every request to the webhook URL triggers the workflow +5. **Workflow Run** - Every request to the webhook URL triggers the workflow ### Features @@ -38,7 +38,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo - **Automatic Parsing**: Webhook data is automatically parsed and available to subsequent blocks - **Authentication**: Optional bearer token or custom header authentication - **Rate Limiting**: Built-in protection against abuse -- **Deduplication**: Prevents duplicate executions from repeated requests +- **Deduplication**: Prevents duplicate runs from repeated requests The Generic Webhook trigger fires every time the webhook URL receives a request, making it perfect for real-time integrations. @@ -58,7 +58,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in 2. **Enable Trigger Mode** - Toggle "Use as Trigger" in the block settings 3. **Configure Service** - Set up authentication and event filters specific to that service 4. **Webhook Registration** - The service automatically registers the webhook with the external platform -5. **Event-Based Execution** - Workflow triggers only for specific events from that service +5. **Event-Based Runs** - Workflow triggers only for specific events from that service ### When to Use Each Approach @@ -120,7 +120,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in ### Testing Webhooks 1. Use tools like Postman or curl to test your webhook endpoints -2. Check workflow execution logs for debugging +2. Check workflow run logs for debugging 3. Verify payload structure matches your expectations 4. Test authentication and error scenarios @@ -153,8 +153,8 @@ Always validate and sanitize incoming webhook data before processing it in your { question: "What HTTP methods does the Generic Webhook endpoint accept?", answer: "The webhook endpoint handles POST requests for triggering workflows. GET requests are only used for provider-specific verification challenges (such as Microsoft Graph or WhatsApp verification). Other methods return a 405 Method Not Allowed response." }, { question: "How do I authenticate webhook requests?", answer: "Enable the Require Authentication toggle in the webhook configuration, then set an Authentication Token. Callers can send the token as a Bearer token in the Authorization header, or you can specify a custom header name (e.g., X-Secret-Key) and the token will be matched against that header instead." }, { question: "Can I define the expected payload structure for a webhook?", answer: "Yes. The Generic Webhook block includes an Input Format field where you can define the expected JSON schema. This is optional but helps document the expected structure. You can also use type \"file[]\" for file upload fields." }, - { question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate executions from repeated requests with the same payload." }, + { question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate runs from repeated requests with the same payload." }, { question: "What data from the webhook request is available in my workflow?", answer: "All request data including headers, body, and query parameters is parsed and made available to subsequent blocks. Common fields like event, id, and data are automatically extracted from the payload when present." }, - { question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering execution. If the workflow is not deployed, the webhook returns a not-found response." }, - { question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the execution logs for error details." }, + { question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering a run. If the workflow is not deployed, the webhook returns a not-found response." }, + { question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the run logs for error details." }, ]} /> diff --git a/apps/sim/app/(landing)/blog/authors/[id]/page.tsx b/apps/sim/app/(landing)/blog/authors/[id]/page.tsx index 3362e3ee917..bbbc8a55913 100644 --- a/apps/sim/app/(landing)/blog/authors/[id]/page.tsx +++ b/apps/sim/app/(landing)/blog/authors/[id]/page.tsx @@ -2,6 +2,7 @@ import type { Metadata } from 'next' import Image from 'next/image' import Link from 'next/link' import { getAllPostMeta } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' export const revalidate = 3600 @@ -17,11 +18,11 @@ export async function generateMetadata({ return { title: `${name} — Sim Blog`, description: `Read articles by ${name} on the Sim blog.`, - alternates: { canonical: `https://sim.ai/blog/authors/${id}` }, + alternates: { canonical: `${SITE_URL}/blog/authors/${id}` }, openGraph: { title: `${name} — Sim Blog`, description: `Read articles by ${name} on the Sim blog.`, - url: `https://sim.ai/blog/authors/${id}`, + url: `${SITE_URL}/blog/authors/${id}`, siteName: 'Sim', type: 'profile', ...(author?.avatarUrl @@ -55,25 +56,25 @@ export default async function AuthorPage({ params }: { params: Promise<{ id: str { '@type': 'Person', name: author.name, - url: `https://sim.ai/blog/authors/${author.id}`, + url: `${SITE_URL}/blog/authors/${author.id}`, sameAs: author.url ? [author.url] : [], image: author.avatarUrl, worksFor: { '@type': 'Organization', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, }, }, { '@type': 'BreadcrumbList', itemListElement: [ - { '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' }, - { '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' }, + { '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }, + { '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` }, { '@type': 'ListItem', position: 3, name: author.name, - item: `https://sim.ai/blog/authors/${author.id}`, + item: `${SITE_URL}/blog/authors/${author.id}`, }, ], }, diff --git a/apps/sim/app/(landing)/blog/layout.tsx b/apps/sim/app/(landing)/blog/layout.tsx index 512f41a32ee..96b81a7dca5 100644 --- a/apps/sim/app/(landing)/blog/layout.tsx +++ b/apps/sim/app/(landing)/blog/layout.tsx @@ -1,4 +1,5 @@ import { getNavBlogPosts } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' import Footer from '@/app/(landing)/components/footer/footer' import Navbar from '@/app/(landing)/components/navbar/navbar' @@ -8,10 +9,10 @@ export default async function StudioLayout({ children }: { children: React.React '@context': 'https://schema.org', '@type': 'Organization', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents.', - logo: 'https://sim.ai/logo/primary/small.png', + logo: `${SITE_URL}/logo/primary/small.png`, sameAs: [ 'https://x.com/simdotai', 'https://github.com/simstudioai/sim', @@ -23,7 +24,7 @@ export default async function StudioLayout({ children }: { children: React.React '@context': 'https://schema.org', '@type': 'WebSite', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, } return ( diff --git a/apps/sim/app/(landing)/blog/page.tsx b/apps/sim/app/(landing)/blog/page.tsx index a7339cc76ad..f12f73ed253 100644 --- a/apps/sim/app/(landing)/blog/page.tsx +++ b/apps/sim/app/(landing)/blog/page.tsx @@ -4,6 +4,7 @@ import Link from 'next/link' import { Badge } from '@/components/emcn' import { getAllPostMeta } from '@/lib/blog/registry' import { buildCollectionPageJsonLd } from '@/lib/blog/seo' +import { SITE_URL } from '@/lib/core/utils/urls' export async function generateMetadata({ searchParams, @@ -26,7 +27,7 @@ export async function generateMetadata({ if (tag) canonicalParams.set('tag', tag) if (pageNum > 1) canonicalParams.set('page', String(pageNum)) const qs = canonicalParams.toString() - const canonical = `https://sim.ai/blog${qs ? `?${qs}` : ''}` + const canonical = `${SITE_URL}/blog${qs ? `?${qs}` : ''}` return { title, @@ -41,7 +42,7 @@ export async function generateMetadata({ type: 'website', images: [ { - url: 'https://sim.ai/logo/primary/medium.png', + url: `${SITE_URL}/logo/primary/medium.png`, width: 1200, height: 630, alt: 'Sim Blog', diff --git a/apps/sim/app/(landing)/blog/rss.xml/route.ts b/apps/sim/app/(landing)/blog/rss.xml/route.ts index fdabfce7ebc..6460e032216 100644 --- a/apps/sim/app/(landing)/blog/rss.xml/route.ts +++ b/apps/sim/app/(landing)/blog/rss.xml/route.ts @@ -1,12 +1,13 @@ import { NextResponse } from 'next/server' import { getAllPostMeta } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' export const revalidate = 3600 export async function GET() { const posts = await getAllPostMeta() const items = posts.slice(0, 50) - const site = 'https://sim.ai' + const site = SITE_URL const lastBuildDate = items.length > 0 ? new Date(items[0].date).toUTCString() : new Date().toUTCString() diff --git a/apps/sim/app/(landing)/blog/sitemap-images.xml/route.ts b/apps/sim/app/(landing)/blog/sitemap-images.xml/route.ts index 7fa302f299d..c40833c02c2 100644 --- a/apps/sim/app/(landing)/blog/sitemap-images.xml/route.ts +++ b/apps/sim/app/(landing)/blog/sitemap-images.xml/route.ts @@ -1,11 +1,12 @@ import { NextResponse } from 'next/server' import { getAllPostMeta } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' export const revalidate = 3600 export async function GET() { const posts = await getAllPostMeta() - const base = 'https://sim.ai' + const base = SITE_URL const xml = ` ${posts diff --git a/apps/sim/app/(landing)/blog/tags/page.tsx b/apps/sim/app/(landing)/blog/tags/page.tsx index 1b5ccceea30..b18cff5a46d 100644 --- a/apps/sim/app/(landing)/blog/tags/page.tsx +++ b/apps/sim/app/(landing)/blog/tags/page.tsx @@ -1,15 +1,16 @@ import type { Metadata } from 'next' import Link from 'next/link' import { getAllTags } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' export const metadata: Metadata = { title: 'Tags', description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.', - alternates: { canonical: 'https://sim.ai/blog/tags' }, + alternates: { canonical: `${SITE_URL}/blog/tags` }, openGraph: { title: 'Blog Tags | Sim', description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.', - url: 'https://sim.ai/blog/tags', + url: `${SITE_URL}/blog/tags`, siteName: 'Sim', locale: 'en_US', type: 'website', @@ -26,9 +27,9 @@ const breadcrumbJsonLd = { '@context': 'https://schema.org', '@type': 'BreadcrumbList', itemListElement: [ - { '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' }, - { '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' }, - { '@type': 'ListItem', position: 3, name: 'Tags', item: 'https://sim.ai/blog/tags' }, + { '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }, + { '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` }, + { '@type': 'ListItem', position: 3, name: 'Tags', item: `${SITE_URL}/blog/tags` }, ], } diff --git a/apps/sim/app/(landing)/components/structured-data.tsx b/apps/sim/app/(landing)/components/structured-data.tsx index b03c4fb45e9..5a55b1c1c5d 100644 --- a/apps/sim/app/(landing)/components/structured-data.tsx +++ b/apps/sim/app/(landing)/components/structured-data.tsx @@ -1,3 +1,5 @@ +import { SITE_URL } from '@/lib/core/utils/urls' + /** * JSON-LD structured data for the landing page. * @@ -23,22 +25,22 @@ export default function StructuredData() { '@graph': [ { '@type': 'Organization', - '@id': 'https://sim.ai/#organization', + '@id': `${SITE_URL}/#organization`, name: 'Sim', alternateName: 'Sim Studio', description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.', - url: 'https://sim.ai', + url: SITE_URL, logo: { '@type': 'ImageObject', - '@id': 'https://sim.ai/#logo', - url: 'https://sim.ai/logo/b%26w/text/b%26w.svg', - contentUrl: 'https://sim.ai/logo/b%26w/text/b%26w.svg', + '@id': `${SITE_URL}/#logo`, + url: `${SITE_URL}/logo/b%26w/text/b%26w.svg`, + contentUrl: `${SITE_URL}/logo/b%26w/text/b%26w.svg`, width: 49.78314, height: 24.276, caption: 'Sim Logo', }, - image: { '@id': 'https://sim.ai/#logo' }, + image: { '@id': `${SITE_URL}/#logo` }, sameAs: [ 'https://x.com/simdotai', 'https://github.com/simstudioai/sim', @@ -53,44 +55,42 @@ export default function StructuredData() { }, { '@type': 'WebSite', - '@id': 'https://sim.ai/#website', - url: 'https://sim.ai', + '@id': `${SITE_URL}/#website`, + url: SITE_URL, name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents', description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM. Join 100,000+ builders.', - publisher: { '@id': 'https://sim.ai/#organization' }, + publisher: { '@id': `${SITE_URL}/#organization` }, inLanguage: 'en-US', }, { '@type': 'WebPage', - '@id': 'https://sim.ai/#webpage', - url: 'https://sim.ai', + '@id': `${SITE_URL}/#webpage`, + url: SITE_URL, name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents', - isPartOf: { '@id': 'https://sim.ai/#website' }, - about: { '@id': 'https://sim.ai/#software' }, + isPartOf: { '@id': `${SITE_URL}/#website` }, + about: { '@id': `${SITE_URL}/#software` }, datePublished: '2024-01-01T00:00:00+00:00', dateModified: new Date().toISOString(), description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.', - breadcrumb: { '@id': 'https://sim.ai/#breadcrumb' }, + breadcrumb: { '@id': `${SITE_URL}/#breadcrumb` }, inLanguage: 'en-US', speakable: { '@type': 'SpeakableSpecification', cssSelector: ['#hero-heading', '[id="hero"] p'], }, - potentialAction: [{ '@type': 'ReadAction', target: ['https://sim.ai'] }], + potentialAction: [{ '@type': 'ReadAction', target: [SITE_URL] }], }, { '@type': 'BreadcrumbList', - '@id': 'https://sim.ai/#breadcrumb', - itemListElement: [ - { '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' }, - ], + '@id': `${SITE_URL}/#breadcrumb`, + itemListElement: [{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }], }, { '@type': 'WebApplication', - '@id': 'https://sim.ai/#software', - url: 'https://sim.ai', + '@id': `${SITE_URL}/#software`, + url: SITE_URL, name: 'Sim — The AI Workspace', description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code. Trusted by over 100,000 builders. SOC2 compliant.', @@ -98,7 +98,7 @@ export default function StructuredData() { applicationSubCategory: 'AI Workspace', operatingSystem: 'Web', browserRequirements: 'Requires a modern browser with JavaScript enabled', - installUrl: 'https://sim.ai/signup', + installUrl: `${SITE_URL}/signup`, offers: [ { '@type': 'Offer', @@ -175,16 +175,16 @@ export default function StructuredData() { }, { '@type': 'SoftwareSourceCode', - '@id': 'https://sim.ai/#source', + '@id': `${SITE_URL}/#source`, codeRepository: 'https://github.com/simstudioai/sim', programmingLanguage: ['TypeScript', 'Python'], runtimePlatform: 'Node.js', license: 'https://opensource.org/licenses/Apache-2.0', - isPartOf: { '@id': 'https://sim.ai/#software' }, + isPartOf: { '@id': `${SITE_URL}/#software` }, }, { '@type': 'FAQPage', - '@id': 'https://sim.ai/#faq', + '@id': `${SITE_URL}/#faq`, mainEntity: [ { '@type': 'Question', diff --git a/apps/sim/app/(landing)/integrations/[slug]/page.tsx b/apps/sim/app/(landing)/integrations/[slug]/page.tsx index e93bf9c73fe..d48ea6f4054 100644 --- a/apps/sim/app/(landing)/integrations/[slug]/page.tsx +++ b/apps/sim/app/(landing)/integrations/[slug]/page.tsx @@ -2,7 +2,7 @@ import type { Metadata } from 'next' import Image from 'next/image' import Link from 'next/link' import { notFound } from 'next/navigation' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import { IntegrationCtaButton } from '@/app/(landing)/integrations/[slug]/components/integration-cta-button' import { IntegrationFAQ } from '@/app/(landing)/integrations/[slug]/components/integration-faq' import { TemplateCardButton } from '@/app/(landing)/integrations/[slug]/components/template-card-button' @@ -14,7 +14,7 @@ import { TEMPLATES } from '@/app/workspace/[workspaceId]/home/components/templat const allIntegrations = integrations as Integration[] const INTEGRATION_COUNT = allIntegrations.length -const baseUrl = getBaseUrl() +const baseUrl = SITE_URL /** Fast O(1) lookups — avoids repeated linear scans inside render loops. */ const bySlug = new Map(allIntegrations.map((i) => [i.slug, i])) diff --git a/apps/sim/app/(landing)/integrations/layout.tsx b/apps/sim/app/(landing)/integrations/layout.tsx index 23614abe122..231771091c7 100644 --- a/apps/sim/app/(landing)/integrations/layout.tsx +++ b/apps/sim/app/(landing)/integrations/layout.tsx @@ -1,11 +1,11 @@ import { getNavBlogPosts } from '@/lib/blog/registry' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import Footer from '@/app/(landing)/components/footer/footer' import Navbar from '@/app/(landing)/components/navbar/navbar' export default async function IntegrationsLayout({ children }: { children: React.ReactNode }) { const blogPosts = await getNavBlogPosts() - const url = getBaseUrl() + const url = SITE_URL const orgJsonLd = { '@context': 'https://schema.org', '@type': 'Organization', diff --git a/apps/sim/app/(landing)/integrations/page.tsx b/apps/sim/app/(landing)/integrations/page.tsx index 60927489eeb..3340ba7f271 100644 --- a/apps/sim/app/(landing)/integrations/page.tsx +++ b/apps/sim/app/(landing)/integrations/page.tsx @@ -1,6 +1,6 @@ import type { Metadata } from 'next' import { Badge } from '@/components/emcn' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import { IntegrationCard } from './components/integration-card' import { IntegrationGrid } from './components/integration-grid' import { RequestIntegrationModal } from './components/request-integration-modal' @@ -18,7 +18,7 @@ const INTEGRATION_COUNT = allIntegrations.length */ const TOP_NAMES = [...new Set(POPULAR_WORKFLOWS.flatMap((p) => [p.from, p.to]))].slice(0, 6) -const baseUrl = getBaseUrl() +const baseUrl = SITE_URL /** Curated featured integrations — high-recognition services shown as cards. */ const FEATURED_SLUGS = ['slack', 'notion', 'github', 'gmail'] as const diff --git a/apps/sim/app/(landing)/layout.tsx b/apps/sim/app/(landing)/layout.tsx index bb6ee982754..3b10895f16e 100644 --- a/apps/sim/app/(landing)/layout.tsx +++ b/apps/sim/app/(landing)/layout.tsx @@ -1,9 +1,10 @@ import type { Metadata } from 'next' +import { SITE_URL } from '@/lib/core/utils/urls' import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono' import { season } from '@/app/_styles/fonts/season/season' export const metadata: Metadata = { - metadataBase: new URL('https://sim.ai'), + metadataBase: new URL(SITE_URL), manifest: '/manifest.webmanifest', icons: { icon: [{ url: '/icon.svg', type: 'image/svg+xml', sizes: 'any' }], diff --git a/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx b/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx index 7334c689cbb..c8ab7d8c423 100644 --- a/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx +++ b/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx @@ -1,7 +1,7 @@ import type { Metadata } from 'next' import Link from 'next/link' import { notFound } from 'next/navigation' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import { LandingFAQ } from '@/app/(landing)/components/landing-faq' import { FeaturedModelCard, ProviderIcon } from '@/app/(landing)/models/components/model-primitives' import { @@ -18,7 +18,7 @@ import { getRelatedModels, } from '@/app/(landing)/models/utils' -const baseUrl = getBaseUrl() +const baseUrl = SITE_URL export async function generateStaticParams() { return ALL_CATALOG_MODELS.map((model) => ({ @@ -221,7 +221,7 @@ export default async function ModelPage({
Build with this model diff --git a/apps/sim/app/(landing)/models/[provider]/page.tsx b/apps/sim/app/(landing)/models/[provider]/page.tsx index 19e9fa730e3..ae2acbe2734 100644 --- a/apps/sim/app/(landing)/models/[provider]/page.tsx +++ b/apps/sim/app/(landing)/models/[provider]/page.tsx @@ -2,7 +2,7 @@ import type { Metadata } from 'next' import Link from 'next/link' import { notFound } from 'next/navigation' import { Badge } from '@/components/emcn' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import { LandingFAQ } from '@/app/(landing)/components/landing-faq' import { ChevronArrow, @@ -20,7 +20,7 @@ import { TOP_MODEL_PROVIDERS, } from '@/app/(landing)/models/utils' -const baseUrl = getBaseUrl() +const baseUrl = SITE_URL export async function generateStaticParams() { return MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({ diff --git a/apps/sim/app/(landing)/models/layout.tsx b/apps/sim/app/(landing)/models/layout.tsx index f211da54610..672632f70b4 100644 --- a/apps/sim/app/(landing)/models/layout.tsx +++ b/apps/sim/app/(landing)/models/layout.tsx @@ -1,11 +1,11 @@ import { getNavBlogPosts } from '@/lib/blog/registry' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import Footer from '@/app/(landing)/components/footer/footer' import Navbar from '@/app/(landing)/components/navbar/navbar' export default async function ModelsLayout({ children }: { children: React.ReactNode }) { const blogPosts = await getNavBlogPosts() - const url = getBaseUrl() + const url = SITE_URL const orgJsonLd = { '@context': 'https://schema.org', '@type': 'Organization', diff --git a/apps/sim/app/(landing)/models/page.tsx b/apps/sim/app/(landing)/models/page.tsx index ed41353f74f..dd01727fde7 100644 --- a/apps/sim/app/(landing)/models/page.tsx +++ b/apps/sim/app/(landing)/models/page.tsx @@ -1,6 +1,6 @@ import type { Metadata } from 'next' import { Badge } from '@/components/emcn' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import { LandingFAQ } from '@/app/(landing)/components/landing-faq' import { ModelComparisonCharts } from '@/app/(landing)/models/components/model-comparison-charts' import { ModelDirectory } from '@/app/(landing)/models/components/model-directory' @@ -17,7 +17,7 @@ import { TOTAL_MODELS, } from '@/app/(landing)/models/utils' -const baseUrl = getBaseUrl() +const baseUrl = SITE_URL const faqItems = [ { diff --git a/apps/sim/app/(landing)/partners/page.tsx b/apps/sim/app/(landing)/partners/page.tsx index ccdda2603ee..e6d26f0d3b4 100644 --- a/apps/sim/app/(landing)/partners/page.tsx +++ b/apps/sim/app/(landing)/partners/page.tsx @@ -1,5 +1,6 @@ import type { Metadata } from 'next' import { getNavBlogPosts } from '@/lib/blog/registry' +import { SITE_URL } from '@/lib/core/utils/urls' import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono' import { season } from '@/app/_styles/fonts/season/season' import Footer from '@/app/(landing)/components/footer/footer' @@ -9,7 +10,7 @@ export const metadata: Metadata = { title: 'Partner Program', description: "Join the Sim partner program. Build, deploy, and sell AI agent solutions powered by Sim's AI workspace. Earn your certification through Sim Academy.", - metadataBase: new URL('https://sim.ai'), + metadataBase: new URL(SITE_URL), openGraph: { title: 'Partner Program | Sim', description: 'Join the Sim partner program.', diff --git a/apps/sim/app/(landing)/seo.test.ts b/apps/sim/app/(landing)/seo.test.ts new file mode 100644 index 00000000000..cb7b207af05 --- /dev/null +++ b/apps/sim/app/(landing)/seo.test.ts @@ -0,0 +1,127 @@ +/** + * @vitest-environment node + */ +import fs from 'fs' +import path from 'path' +import { describe, expect, it } from 'vitest' +import { SITE_URL } from '@/lib/core/utils/urls' + +const SIM_ROOT = path.resolve(__dirname, '..', '..') +const APP_DIR = path.resolve(SIM_ROOT, 'app') +const LANDING_DIR = path.resolve(APP_DIR, '(landing)') + +/** + * All directories containing public-facing pages or SEO-relevant code. + * Non-marketing app routes (workspace, chat, form) are excluded — + * they legitimately use getBaseUrl() for dynamic, env-dependent URLs. + */ +const SEO_SCAN_DIRS = [ + LANDING_DIR, + path.resolve(APP_DIR, 'changelog'), + path.resolve(APP_DIR, 'changelog.xml'), + path.resolve(APP_DIR, 'academy'), + path.resolve(SIM_ROOT, 'lib', 'blog'), + path.resolve(SIM_ROOT, 'content', 'blog'), +] + +const SEO_SCAN_INDIVIDUAL_FILES = [ + path.resolve(APP_DIR, 'page.tsx'), + path.resolve(SIM_ROOT, 'ee', 'whitelabeling', 'metadata.ts'), +] + +function collectFiles(dir: string, exts: string[]): string[] { + const results: string[] = [] + if (!fs.existsSync(dir)) return results + + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { + const full = path.join(dir, entry.name) + if (entry.isDirectory()) { + results.push(...collectFiles(full, exts)) + } else if (exts.some((ext) => entry.name.endsWith(ext)) && !entry.name.includes('.test.')) { + results.push(full) + } + } + return results +} + +function getAllSeoFiles(exts: string[]): string[] { + const files: string[] = [] + for (const dir of SEO_SCAN_DIRS) { + files.push(...collectFiles(dir, exts)) + } + for (const file of SEO_SCAN_INDIVIDUAL_FILES) { + if (fs.existsSync(file)) files.push(file) + } + return files +} + +describe('SEO canonical URLs', () => { + it('SITE_URL equals https://www.sim.ai', () => { + expect(SITE_URL).toBe('https://www.sim.ai') + }) + + it('public pages do not hardcode https://sim.ai (without www)', () => { + const files = getAllSeoFiles(['.ts', '.tsx', '.mdx']) + const violations: string[] = [] + + for (const file of files) { + const content = fs.readFileSync(file, 'utf-8') + const lines = content.split('\n') + + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + const hasBareSimAi = + line.includes("'https://sim.ai'") || + line.includes("'https://sim.ai/") || + line.includes('"https://sim.ai"') || + line.includes('"https://sim.ai/') || + line.includes('`https://sim.ai/') || + line.includes('`https://sim.ai`') || + line.includes('canonical: https://sim.ai/') + + if (!hasBareSimAi) continue + + const isAllowlisted = + line.includes('https://sim.ai/careers') || line.includes('https://sim.ai/discord') + + if (isAllowlisted) continue + + const rel = path.relative(SIM_ROOT, file) + violations.push(`${rel}:${i + 1}: ${line.trim()}`) + } + } + + expect( + violations, + `Found hardcoded https://sim.ai (without www):\n${violations.join('\n')}` + ).toHaveLength(0) + }) + + it('public pages do not use getBaseUrl() for SEO metadata', () => { + const files = getAllSeoFiles(['.ts', '.tsx']) + const violations: string[] = [] + + for (const file of files) { + const content = fs.readFileSync(file, 'utf-8') + + if (!content.includes('getBaseUrl')) continue + + const hasMetadataExport = + content.includes('export const metadata') || + content.includes('export async function generateMetadata') + const usesGetBaseUrlInMetadata = + hasMetadataExport && + (content.includes('= getBaseUrl()') || content.includes('metadataBase: new URL(getBaseUrl')) + + if (usesGetBaseUrlInMetadata) { + const rel = path.relative(SIM_ROOT, file) + violations.push(rel) + } + } + + expect( + violations, + `Public pages should use SITE_URL for metadata, not getBaseUrl():\n${violations.join('\n')}` + ).toHaveLength(0) + }) +}) diff --git a/apps/sim/app/academy/layout.tsx b/apps/sim/app/academy/layout.tsx index 502b1d4d574..265a01e83e9 100644 --- a/apps/sim/app/academy/layout.tsx +++ b/apps/sim/app/academy/layout.tsx @@ -1,6 +1,7 @@ import type React from 'react' import type { Metadata } from 'next' import { notFound } from 'next/navigation' +import { SITE_URL } from '@/lib/core/utils/urls' // TODO: Remove notFound() call to make academy pages public once content is ready const ACADEMY_ENABLED = false @@ -12,7 +13,7 @@ export const metadata: Metadata = { }, description: 'Become a certified Sim partner — learn to build, integrate, and deploy AI workflows.', - metadataBase: new URL('https://sim.ai'), + metadataBase: new URL(SITE_URL), openGraph: { title: 'Sim Academy', description: 'Become a certified Sim partner.', diff --git a/apps/sim/app/api/auth/oauth/microsoft/files/route.ts b/apps/sim/app/api/auth/oauth/microsoft/files/route.ts index 23bd2e57e5e..d38419f3998 100644 --- a/apps/sim/app/api/auth/oauth/microsoft/files/route.ts +++ b/apps/sim/app/api/auth/oauth/microsoft/files/route.ts @@ -1,8 +1,10 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { authorizeCredentialUse } from '@/lib/auth/credential-access' +import { validatePathSegment } from '@/lib/core/security/input-validation' import { generateRequestId } from '@/lib/core/utils/request' import { getCredential, refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils' export const dynamic = 'force-dynamic' @@ -19,6 +21,7 @@ export async function GET(request: NextRequest) { const { searchParams } = new URL(request.url) const credentialId = searchParams.get('credentialId') const query = searchParams.get('query') || '' + const driveId = searchParams.get('driveId') || undefined const workflowId = searchParams.get('workflowId') || undefined if (!credentialId) { @@ -72,8 +75,21 @@ export async function GET(request: NextRequest) { ) searchParams_new.append('$top', '50') + // When driveId is provided (SharePoint), search within that specific drive. + // Otherwise, search the user's personal OneDrive. + if (driveId) { + const driveIdValidation = validatePathSegment(driveId, { + paramName: 'driveId', + customPattern: GRAPH_ID_PATTERN, + }) + if (!driveIdValidation.isValid) { + return NextResponse.json({ error: driveIdValidation.error }, { status: 400 }) + } + } + const drivePath = driveId ? `drives/${driveId}` : 'me/drive' + const response = await fetch( - `https://graph.microsoft.com/v1.0/me/drive/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`, + `https://graph.microsoft.com/v1.0/${drivePath}/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`, { headers: { Authorization: `Bearer ${accessToken}`, diff --git a/apps/sim/app/api/copilot/chat/resources/route.ts b/apps/sim/app/api/copilot/chat/resources/route.ts index f6042138c5e..9335c86d07d 100644 --- a/apps/sim/app/api/copilot/chat/resources/route.ts +++ b/apps/sim/app/api/copilot/chat/resources/route.ts @@ -169,24 +169,24 @@ export async function DELETE(req: NextRequest) { const body = await req.json() const { chatId, resourceType, resourceId } = RemoveResourceSchema.parse(body) - const [chat] = await db - .select({ resources: copilotChats.resources }) - .from(copilotChats) + const [updated] = await db + .update(copilotChats) + .set({ + resources: sql`COALESCE(( + SELECT jsonb_agg(elem) + FROM jsonb_array_elements(${copilotChats.resources}) elem + WHERE NOT (elem->>'type' = ${resourceType} AND elem->>'id' = ${resourceId}) + ), '[]'::jsonb)`, + updatedAt: new Date(), + }) .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId))) - .limit(1) + .returning({ resources: copilotChats.resources }) - if (!chat) { + if (!updated) { return createNotFoundResponse('Chat not found or unauthorized') } - const existing = Array.isArray(chat.resources) ? (chat.resources as ChatResource[]) : [] - const key = `${resourceType}:${resourceId}` - const merged = existing.filter((r) => `${r.type}:${r.id}` !== key) - - await db - .update(copilotChats) - .set({ resources: sql`${JSON.stringify(merged)}::jsonb`, updatedAt: new Date() }) - .where(eq(copilotChats.id, chatId)) + const merged = Array.isArray(updated.resources) ? (updated.resources as ChatResource[]) : [] logger.info('Removed resource from chat', { chatId, resourceType, resourceId }) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index ebff9bb80b6..f2bc6a2754f 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -17,6 +17,7 @@ import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { validateOAuthAccessToken } from '@/lib/auth/oauth-token' import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' +import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { runHeadlessCopilotLifecycle } from '@/lib/copilot/request/lifecycle/headless' import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent' @@ -136,14 +137,14 @@ When the user refers to a workflow by name or description ("the email one", "my ### Organization - \`rename_workflow\` — rename a workflow -- \`move_workflow\` — move a workflow into a folder (or root with null) -- \`move_folder\` — nest a folder inside another (or root with null) +- \`move_workflow\` — move a workflow into a folder (or back to root by clearing the folder id) +- \`move_folder\` — nest a folder inside another (or move it back to root by clearing the parent id) - \`create_folder(name, parentId)\` — create nested folder hierarchies ### Key Rules - You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP). -- All workflow-scoped copilot tools require \`workflowId\`. +- Tools that operate on a specific workflow such as \`sim_workflow\`, \`sim_test\`, \`sim_deploy\`, and workflow-scoped \`sim_info\` requests require \`workflowId\`. - If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end. - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. ` @@ -667,10 +668,10 @@ async function handleDirectToolCall( } /** - * Build mode uses the main chat orchestrator with the 'fast' command instead of - * the subagent endpoint. In Go, 'workflow' is not a registered subagent — it's a mode - * (ModeFast) on the main chat processor that bypasses subagent orchestration and - * executes all tools directly. + * Build mode uses the main /api/mcp orchestrator instead of /api/subagent/workflow. + * The main agent still delegates workflow work to the workflow subagent inside Go; + * this helper simply uses the full headless lifecycle so build requests behave like + * the primary MCP chat flow. */ async function handleBuildToolCall( args: Record, @@ -680,6 +681,8 @@ async function handleBuildToolCall( try { const requestText = (args.request as string) || JSON.stringify(args) const workflowId = args.workflowId as string | undefined + let resolvedWorkflowName: string | undefined + let resolvedWorkspaceId: string | undefined const resolved = workflowId ? await (async () => { @@ -688,8 +691,10 @@ async function handleBuildToolCall( userId, action: 'read', }) + resolvedWorkflowName = authorization.workflow?.name || undefined + resolvedWorkspaceId = authorization.workflow?.workspaceId || undefined return authorization.allowed - ? { status: 'resolved' as const, workflowId } + ? { status: 'resolved' as const, workflowId, workflowName: resolvedWorkflowName } : { status: 'not_found' as const, message: 'workflowId is required for build. Call create_workflow first.', @@ -697,6 +702,10 @@ async function handleBuildToolCall( })() : await resolveWorkflowIdForUser(userId) + if (resolved.status === 'resolved') { + resolvedWorkflowName ||= resolved.workflowName + } + if (!resolved || resolved.status !== 'resolved') { return { content: [ @@ -719,10 +728,29 @@ async function handleBuildToolCall( } const chatId = generateId() + const executionContext = await prepareExecutionContext(userId, resolved.workflowId, chatId, { + workspaceId: resolvedWorkspaceId, + }) + resolvedWorkspaceId = executionContext.workspaceId + let workspaceContext: string | undefined + if (resolvedWorkspaceId) { + try { + workspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) + } catch (error) { + logger.warn('Failed to generate workspace context for build tool call', { + workflowId: resolved.workflowId, + workspaceId: resolvedWorkspaceId, + error: error instanceof Error ? error.message : String(error), + }) + } + } const requestPayload = { message: requestText, workflowId: resolved.workflowId, + ...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}), + ...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}), + ...(workspaceContext ? { workspaceContext } : {}), userId, model: DEFAULT_COPILOT_MODEL, mode: 'agent', @@ -734,8 +762,10 @@ async function handleBuildToolCall( const result = await runHeadlessCopilotLifecycle(requestPayload, { userId, workflowId: resolved.workflowId, + workspaceId: resolvedWorkspaceId, chatId, goRoute: '/api/mcp', + executionContext, autoExecuteTools: true, timeout: ORCHESTRATION_TIMEOUT_MS, interactive: false, diff --git a/apps/sim/app/api/tools/microsoft_excel/drives/route.ts b/apps/sim/app/api/tools/microsoft_excel/drives/route.ts new file mode 100644 index 00000000000..d0dc8ef7c99 --- /dev/null +++ b/apps/sim/app/api/tools/microsoft_excel/drives/route.ts @@ -0,0 +1,135 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { authorizeCredentialUse } from '@/lib/auth/credential-access' +import { validatePathSegment, validateSharePointSiteId } from '@/lib/core/security/input-validation' +import { generateRequestId } from '@/lib/core/utils/request' +import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils' + +export const dynamic = 'force-dynamic' + +const logger = createLogger('MicrosoftExcelDrivesAPI') + +interface GraphDrive { + id: string + name: string + driveType: string + webUrl?: string +} + +/** + * List document libraries (drives) for a SharePoint site. + * Used by the microsoft.excel.drives selector to let users pick + * which drive contains their Excel file. + */ +export async function POST(request: NextRequest) { + const requestId = generateRequestId() + + try { + const body = await request.json() + const { credential, workflowId, siteId, driveId } = body + + if (!credential) { + logger.warn(`[${requestId}] Missing credential in request`) + return NextResponse.json({ error: 'Credential is required' }, { status: 400 }) + } + + if (!siteId) { + logger.warn(`[${requestId}] Missing siteId in request`) + return NextResponse.json({ error: 'Site ID is required' }, { status: 400 }) + } + + const siteIdValidation = validateSharePointSiteId(siteId, 'siteId') + if (!siteIdValidation.isValid) { + logger.warn(`[${requestId}] Invalid siteId format`) + return NextResponse.json({ error: siteIdValidation.error }, { status: 400 }) + } + + const authz = await authorizeCredentialUse(request, { + credentialId: credential, + workflowId, + }) + if (!authz.ok || !authz.credentialOwnerUserId) { + return NextResponse.json({ error: authz.error || 'Unauthorized' }, { status: 403 }) + } + + const accessToken = await refreshAccessTokenIfNeeded( + credential, + authz.credentialOwnerUserId, + requestId + ) + if (!accessToken) { + logger.warn(`[${requestId}] Failed to obtain valid access token`) + return NextResponse.json( + { error: 'Failed to obtain valid access token', authRequired: true }, + { status: 401 } + ) + } + + // Single-drive lookup when driveId is provided (used by fetchById) + if (driveId) { + const driveIdValidation = validatePathSegment(driveId, { + paramName: 'driveId', + customPattern: GRAPH_ID_PATTERN, + }) + if (!driveIdValidation.isValid) { + return NextResponse.json({ error: driveIdValidation.error }, { status: 400 }) + } + + const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives/${driveId}?$select=id,name,driveType,webUrl` + const response = await fetch(url, { + headers: { Authorization: `Bearer ${accessToken}` }, + }) + + if (!response.ok) { + const errorData = await response + .json() + .catch(() => ({ error: { message: 'Unknown error' } })) + return NextResponse.json( + { error: errorData.error?.message || 'Failed to fetch drive' }, + { status: response.status } + ) + } + + const data: GraphDrive = await response.json() + return NextResponse.json( + { drive: { id: data.id, name: data.name, driveType: data.driveType } }, + { status: 200 } + ) + } + + // List all drives for the site + const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives?$select=id,name,driveType,webUrl` + + const response = await fetch(url, { + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => ({ error: { message: 'Unknown error' } })) + logger.error(`[${requestId}] Microsoft Graph API error fetching drives`, { + status: response.status, + error: errorData.error?.message, + }) + return NextResponse.json( + { error: errorData.error?.message || 'Failed to fetch drives' }, + { status: response.status } + ) + } + + const data = await response.json() + const drives = (data.value || []).map((drive: GraphDrive) => ({ + id: drive.id, + name: drive.name, + driveType: drive.driveType, + })) + + logger.info(`[${requestId}] Successfully fetched ${drives.length} drives for site ${siteId}`) + return NextResponse.json({ drives }, { status: 200 }) + } catch (error) { + logger.error(`[${requestId}] Error fetching drives`, error) + return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/microsoft_excel/sheets/route.ts b/apps/sim/app/api/tools/microsoft_excel/sheets/route.ts index d4f8035149e..367e04fc413 100644 --- a/apps/sim/app/api/tools/microsoft_excel/sheets/route.ts +++ b/apps/sim/app/api/tools/microsoft_excel/sheets/route.ts @@ -3,6 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { authorizeCredentialUse } from '@/lib/auth/credential-access' import { generateRequestId } from '@/lib/core/utils/request' import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { getItemBasePath } from '@/tools/microsoft_excel/utils' export const dynamic = 'force-dynamic' @@ -30,6 +31,7 @@ export async function GET(request: NextRequest) { const { searchParams } = new URL(request.url) const credentialId = searchParams.get('credentialId') const spreadsheetId = searchParams.get('spreadsheetId') + const driveId = searchParams.get('driveId') || undefined const workflowId = searchParams.get('workflowId') || undefined if (!credentialId) { @@ -61,17 +63,23 @@ export async function GET(request: NextRequest) { `[${requestId}] Fetching worksheets from Microsoft Graph API for workbook ${spreadsheetId}` ) - // Fetch worksheets from Microsoft Graph API - const worksheetsResponse = await fetch( - `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets`, - { - method: 'GET', - headers: { - Authorization: `Bearer ${accessToken}`, - 'Content-Type': 'application/json', - }, - } - ) + let basePath: string + try { + basePath = getItemBasePath(spreadsheetId, driveId) + } catch (error) { + return NextResponse.json( + { error: error instanceof Error ? error.message : 'Invalid parameters' }, + { status: 400 } + ) + } + + const worksheetsResponse = await fetch(`${basePath}/workbook/worksheets`, { + method: 'GET', + headers: { + Authorization: `Bearer ${accessToken}`, + 'Content-Type': 'application/json', + }, + }) if (!worksheetsResponse.ok) { const errorData = await worksheetsResponse diff --git a/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.test.ts b/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.test.ts index 73b16fdea1c..295b17e4e7e 100644 --- a/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.test.ts +++ b/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.test.ts @@ -2,6 +2,7 @@ * @vitest-environment node */ +import { databaseMock } from '@sim/testing' import { NextRequest } from 'next/server' import { beforeEach, describe, expect, it, vi } from 'vitest' @@ -203,4 +204,73 @@ describe('POST /api/workflows/[id]/executions/[executionId]/cancel', () => { expect(response.status).toBe(403) }) + + it('updates execution log status in DB when durably recorded', async () => { + const mockWhere = vi.fn().mockResolvedValue(undefined) + const mockSet = vi.fn(() => ({ where: mockWhere })) + databaseMock.db.update.mockReturnValueOnce({ set: mockSet }) + mockMarkExecutionCancelled.mockResolvedValue({ + durablyRecorded: true, + reason: 'recorded', + }) + + await POST(makeRequest(), makeParams()) + + expect(databaseMock.db.update).toHaveBeenCalled() + expect(mockSet).toHaveBeenCalledWith({ + status: 'cancelled', + endedAt: expect.any(Date), + }) + }) + + it('updates execution log status in DB when locally aborted', async () => { + const mockWhere = vi.fn().mockResolvedValue(undefined) + const mockSet = vi.fn(() => ({ where: mockWhere })) + databaseMock.db.update.mockReturnValueOnce({ set: mockSet }) + mockMarkExecutionCancelled.mockResolvedValue({ + durablyRecorded: false, + reason: 'redis_unavailable', + }) + mockAbortManualExecution.mockReturnValue(true) + + await POST(makeRequest(), makeParams()) + + expect(databaseMock.db.update).toHaveBeenCalled() + expect(mockSet).toHaveBeenCalledWith({ + status: 'cancelled', + endedAt: expect.any(Date), + }) + }) + + it('does not update execution log status in DB when only paused execution was cancelled', async () => { + mockMarkExecutionCancelled.mockResolvedValue({ + durablyRecorded: false, + reason: 'redis_unavailable', + }) + mockCancelPausedExecution.mockResolvedValue(true) + + await POST(makeRequest(), makeParams()) + + expect(databaseMock.db.update).not.toHaveBeenCalled() + }) + + it('returns success even if direct DB update fails', async () => { + mockMarkExecutionCancelled.mockResolvedValue({ + durablyRecorded: true, + reason: 'recorded', + }) + databaseMock.db.update.mockReturnValueOnce({ + set: vi.fn(() => ({ + where: vi.fn(() => { + throw new Error('DB connection failed') + }), + })), + }) + + const response = await POST(makeRequest(), makeParams()) + + expect(response.status).toBe(200) + const data = await response.json() + expect(data.success).toBe(true) + }) }) diff --git a/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.ts b/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.ts index ec65f693501..889cc353dd5 100644 --- a/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.ts +++ b/apps/sim/app/api/workflows/[id]/executions/[executionId]/cancel/route.ts @@ -1,4 +1,7 @@ +import { db } from '@sim/db' +import { workflowExecutionLogs } from '@sim/db/schema' import { createLogger } from '@sim/logger' +import { and, eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { checkHybridAuth } from '@/lib/auth/hybrid' import { markExecutionCancelled } from '@/lib/execution/cancellation' @@ -83,6 +86,25 @@ export async function POST( }) } + if ((cancellation.durablyRecorded || locallyAborted) && !pausedCancelled) { + try { + await db + .update(workflowExecutionLogs) + .set({ status: 'cancelled', endedAt: new Date() }) + .where( + and( + eq(workflowExecutionLogs.executionId, executionId), + eq(workflowExecutionLogs.status, 'running') + ) + ) + } catch (dbError) { + logger.warn('Failed to update execution log status directly', { + executionId, + error: dbError, + }) + } + } + const success = cancellation.durablyRecorded || locallyAborted || pausedCancelled if (success) { diff --git a/apps/sim/app/api/workflows/[id]/executions/[executionId]/stream/route.ts b/apps/sim/app/api/workflows/[id]/executions/[executionId]/stream/route.ts index ad2f94722d1..22b60f5aae7 100644 --- a/apps/sim/app/api/workflows/[id]/executions/[executionId]/stream/route.ts +++ b/apps/sim/app/api/workflows/[id]/executions/[executionId]/stream/route.ts @@ -48,14 +48,11 @@ export async function GET( const meta = await getExecutionMeta(executionId) if (!meta) { - return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 }) + return NextResponse.json({ error: 'Run buffer not found or expired' }, { status: 404 }) } if (meta.workflowId && meta.workflowId !== workflowId) { - return NextResponse.json( - { error: 'Execution does not belong to this workflow' }, - { status: 403 } - ) + return NextResponse.json({ error: 'Run does not belong to this workflow' }, { status: 403 }) } const fromParam = req.nextUrl.searchParams.get('from') diff --git a/apps/sim/app/api/workflows/[id]/log/route.ts b/apps/sim/app/api/workflows/[id]/log/route.ts index dc50fa6bd4f..dead4bf36db 100644 --- a/apps/sim/app/api/workflows/[id]/log/route.ts +++ b/apps/sim/app/api/workflows/[id]/log/route.ts @@ -95,7 +95,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ const { traceSpans, totalDuration } = buildTraceSpans(resultWithOutput as ExecutionResult) if (result.success === false) { - const message = result.error || 'Workflow execution failed' + const message = result.error || 'Workflow run failed' await loggingSession.safeCompleteWithError({ endedAt: new Date().toISOString(), totalDurationMs: totalDuration || result.metadata?.duration || 0, @@ -112,7 +112,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ } return createSuccessResponse({ - message: 'Execution logs persisted successfully', + message: 'Run logs persisted successfully', }) } diff --git a/apps/sim/app/changelog.xml/route.ts b/apps/sim/app/changelog.xml/route.ts index 5e0752056d9..9aee139447d 100644 --- a/apps/sim/app/changelog.xml/route.ts +++ b/apps/sim/app/changelog.xml/route.ts @@ -1,4 +1,5 @@ import { NextResponse } from 'next/server' +import { SITE_URL } from '@/lib/core/utils/urls' export const dynamic = 'force-static' export const revalidate = 3600 @@ -48,7 +49,7 @@ export async function GET() { Sim Changelog - https://sim.ai/changelog + ${SITE_URL}/changelog Latest changes, fixes and updates in Sim. en-us ${items} diff --git a/apps/sim/app/changelog/page.tsx b/apps/sim/app/changelog/page.tsx index c94b650667e..7b7a5a2a531 100644 --- a/apps/sim/app/changelog/page.tsx +++ b/apps/sim/app/changelog/page.tsx @@ -1,9 +1,11 @@ import type { Metadata } from 'next' +import { SITE_URL } from '@/lib/core/utils/urls' import ChangelogContent from '@/app/changelog/components/changelog-content' export const metadata: Metadata = { title: 'Changelog', description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.', + alternates: { canonical: `${SITE_URL}/changelog` }, openGraph: { title: 'Changelog', description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.', diff --git a/apps/sim/app/chat/components/message/components/markdown-renderer.tsx b/apps/sim/app/chat/components/message/components/markdown-renderer.tsx index 12254f18dd5..37059ef523a 100644 --- a/apps/sim/app/chat/components/message/components/markdown-renderer.tsx +++ b/apps/sim/app/chat/components/message/components/markdown-renderer.tsx @@ -1,10 +1,10 @@ -import React, { type HTMLAttributes, memo, type ReactNode, useMemo } from 'react' +import React, { type HTMLAttributes, memo, type ReactNode } from 'react' import { Streamdown } from 'streamdown' import 'streamdown/styles.css' import { CopyCodeButton, Tooltip } from '@/components/emcn' import { extractTextContent } from '@/lib/core/utils/react-node-text' -export function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) { +function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) { return ( @@ -24,175 +24,151 @@ export function LinkWithPreview({ href, children }: { href: string; children: Re ) } -function createCustomComponents(LinkComponent: typeof LinkWithPreview) { - return { - p: ({ children }: React.HTMLAttributes) => ( -

- {children} -

- ), - - h1: ({ children }: React.HTMLAttributes) => ( -

- {children} -

- ), - h2: ({ children }: React.HTMLAttributes) => ( -

- {children} -

- ), - h3: ({ children }: React.HTMLAttributes) => ( -

- {children} -

- ), - h4: ({ children }: React.HTMLAttributes) => ( -

- {children} -

- ), - - ul: ({ children }: React.HTMLAttributes) => ( -
    - {children} -
- ), - ol: ({ children }: React.HTMLAttributes) => ( -
    - {children} -
- ), - li: ({ children }: React.LiHTMLAttributes) => ( -
  • - {children} -
  • - ), - - pre: ({ children }: HTMLAttributes) => { - let codeProps: HTMLAttributes = {} - let codeContent: ReactNode = children - - if ( - React.isValidElement<{ className?: string; children?: ReactNode }>(children) && - children.type === 'code' - ) { - const childElement = children as React.ReactElement<{ - className?: string - children?: ReactNode - }> - codeProps = { className: childElement.props.className } - codeContent = childElement.props.children - } +const COMPONENTS = { + p: ({ children }: React.HTMLAttributes) => ( +

    + {children} +

    + ), + + h1: ({ children }: React.HTMLAttributes) => ( +

    + {children} +

    + ), + h2: ({ children }: React.HTMLAttributes) => ( +

    + {children} +

    + ), + h3: ({ children }: React.HTMLAttributes) => ( +

    + {children} +

    + ), + h4: ({ children }: React.HTMLAttributes) => ( +

    + {children} +

    + ), + + ul: ({ children }: React.HTMLAttributes) => ( +
      + {children} +
    + ), + ol: ({ children }: React.HTMLAttributes) => ( +
      + {children} +
    + ), + li: ({ children }: React.LiHTMLAttributes) => ( +
  • + {children} +
  • + ), + + pre: ({ children }: HTMLAttributes) => { + let codeProps: HTMLAttributes = {} + let codeContent: ReactNode = children + + if ( + React.isValidElement<{ className?: string; children?: ReactNode }>(children) && + children.type === 'code' + ) { + const childElement = children as React.ReactElement<{ + className?: string + children?: ReactNode + }> + codeProps = { className: childElement.props.className } + codeContent = childElement.props.children + } - return ( -
    -
    - - {codeProps.className?.replace('language-', '') || 'code'} - - -
    -
    -            {codeContent}
    -          
    + return ( +
    +
    + + {codeProps.className?.replace('language-', '') || 'code'} + +
    - ) - }, - - inlineCode: ({ children }: { children?: React.ReactNode }) => ( - - {children} - - ), - - blockquote: ({ children }: React.HTMLAttributes) => ( -
    - {children} -
    - ), - - hr: () =>
    , - - a: ({ href, children, ...props }: React.AnchorHTMLAttributes) => ( - - {children} - - ), - - table: ({ children }: React.TableHTMLAttributes) => ( -
    - - {children} -
    +
    +          {codeContent}
    +        
    - ), - thead: ({ children }: React.HTMLAttributes) => ( - {children} - ), - tbody: ({ children }: React.HTMLAttributes) => ( - - {children} - - ), - tr: ({ children }: React.HTMLAttributes) => ( - - {children} - - ), - th: ({ children }: React.ThHTMLAttributes) => ( - + ) + }, + + inlineCode: ({ children }: { children?: React.ReactNode }) => ( + + {children} + + ), + + blockquote: ({ children }: React.HTMLAttributes) => ( +
    + {children} +
    + ), + + hr: () =>
    , + + a: ({ href, children, ...props }: React.AnchorHTMLAttributes) => ( + + {children} + + ), + + table: ({ children }: React.TableHTMLAttributes) => ( +
    + {children} - - ), - td: ({ children }: React.TdHTMLAttributes) => ( - - ), - - img: ({ src, alt, ...props }: React.ImgHTMLAttributes) => ( - {alt - ), - } +
    - {children} -
    +
    + ), + thead: ({ children }: React.HTMLAttributes) => ( + {children} + ), + tbody: ({ children }: React.HTMLAttributes) => ( + + {children} + + ), + tr: ({ children }: React.HTMLAttributes) => ( + + {children} + + ), + th: ({ children }: React.ThHTMLAttributes) => ( + + {children} + + ), + td: ({ children }: React.TdHTMLAttributes) => ( + + {children} + + ), + + img: ({ src, alt, ...props }: React.ImgHTMLAttributes) => ( + {alt + ), } -const DEFAULT_COMPONENTS = createCustomComponents(LinkWithPreview) - -const MarkdownRenderer = memo(function MarkdownRenderer({ - content, - customLinkComponent, -}: { - content: string - customLinkComponent?: typeof LinkWithPreview -}) { - const components = useMemo(() => { - if (!customLinkComponent) { - return DEFAULT_COMPONENTS - } - return createCustomComponents(customLinkComponent) - }, [customLinkComponent]) - - const processedContent = content.trim() - +const MarkdownRenderer = memo(function MarkdownRenderer({ content }: { content: string }) { return (
    - - {processedContent} + + {content.trim()}
    ) diff --git a/apps/sim/app/chat/components/message/message.tsx b/apps/sim/app/chat/components/message/message.tsx index 9d02cbbcb29..f803e82c771 100644 --- a/apps/sim/app/chat/components/message/message.tsx +++ b/apps/sim/app/chat/components/message/message.tsx @@ -8,7 +8,6 @@ import { ChatFileDownloadAll, } from '@/app/chat/components/message/components/file-download' import MarkdownRenderer from '@/app/chat/components/message/components/markdown-renderer' -import { useThrottledValue } from '@/hooks/use-throttled-value' export interface ChatAttachment { id: string @@ -39,11 +38,6 @@ export interface ChatMessage { files?: ChatFile[] } -function EnhancedMarkdownRenderer({ content }: { content: string }) { - const throttled = useThrottledValue(content) - return -} - export const ClientChatMessage = memo( function ClientChatMessage({ message }: { message: ChatMessage }) { const [isCopied, setIsCopied] = useState(false) @@ -188,7 +182,7 @@ export const ClientChatMessage = memo( {JSON.stringify(cleanTextContent, null, 2)} ) : ( - + )}
    diff --git a/apps/sim/app/chat/constants.ts b/apps/sim/app/chat/constants.ts index babbdd00da4..52e515ca9ec 100644 --- a/apps/sim/app/chat/constants.ts +++ b/apps/sim/app/chat/constants.ts @@ -6,7 +6,7 @@ export const CHAT_ERROR_MESSAGES = { AUTH_REQUIRED_EMAIL: 'Please provide your email to access this chat.', CHAT_UNAVAILABLE: 'This chat is currently unavailable. Please try again later.', NO_CHAT_TRIGGER: - 'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat execution.', + 'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat.', USAGE_LIMIT_EXCEEDED: 'Usage limit exceeded. Please upgrade your plan to continue using chat.', } as const diff --git a/apps/sim/app/llms.txt/route.ts b/apps/sim/app/llms.txt/route.ts index 0e6f7c31873..387f73d4962 100644 --- a/apps/sim/app/llms.txt/route.ts +++ b/apps/sim/app/llms.txt/route.ts @@ -25,10 +25,10 @@ Sim lets teams create agents visually with the workflow builder, conversationall ## Key Concepts -- **Workspace**: The AI workspace — container for agents, workflows, data sources, and executions +- **Workspace**: The AI workspace — container for agents, workflows, data sources, and runs - **Workflow**: Visual builder — directed graph of blocks defining agent logic - **Block**: Individual step such as an LLM call, tool call, HTTP request, or code execution -- **Trigger**: Event or schedule that initiates workflow execution +- **Trigger**: Event or schedule that initiates a workflow run - **Execution**: A single run of a workflow with logs and outputs - **Knowledge Base**: Document store used for retrieval-augmented generation @@ -41,7 +41,7 @@ Sim lets teams create agents visually with the workflow builder, conversationall - Knowledge bases and retrieval-augmented generation - Table creation and management - Document creation and processing -- Scheduled and webhook-triggered executions +- Scheduled and webhook-triggered runs ## Use Cases diff --git a/apps/sim/app/page.tsx b/apps/sim/app/page.tsx index f746d2b3da6..c12a4a75e3d 100644 --- a/apps/sim/app/page.tsx +++ b/apps/sim/app/page.tsx @@ -1,13 +1,11 @@ import type { Metadata } from 'next' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { SITE_URL } from '@/lib/core/utils/urls' import Landing from '@/app/(landing)/landing' export const revalidate = 3600 -const baseUrl = getBaseUrl() - export const metadata: Metadata = { - metadataBase: new URL(baseUrl), + metadataBase: new URL(SITE_URL), title: { absolute: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents', }, @@ -28,7 +26,7 @@ export const metadata: Metadata = { description: 'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code.', type: 'website', - url: baseUrl, + url: SITE_URL, siteName: 'Sim', locale: 'en_US', images: [ @@ -54,10 +52,10 @@ export const metadata: Metadata = { }, }, alternates: { - canonical: baseUrl, + canonical: SITE_URL, languages: { - 'en-US': baseUrl, - 'x-default': baseUrl, + 'en-US': SITE_URL, + 'x-default': SITE_URL, }, }, robots: { diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/chat-content/chat-content.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/chat-content/chat-content.tsx index 46091cd9ce3..2c6fa99d5e2 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/chat-content/chat-content.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/chat-content/chat-content.tsx @@ -18,7 +18,6 @@ import { SpecialTags, } from '@/app/workspace/[workspaceId]/home/components/message-content/components/special-tags' import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types' -import { useStreamingText } from '@/hooks/use-streaming-text' const LANG_ALIASES: Record = { js: 'javascript', @@ -236,7 +235,6 @@ interface ChatContentProps { isStreaming?: boolean onOptionSelect?: (id: string) => void onWorkspaceResourceSelect?: (resource: MothershipResource) => void - smoothStreaming?: boolean } export function ChatContent({ @@ -244,20 +242,7 @@ export function ChatContent({ isStreaming = false, onOptionSelect, onWorkspaceResourceSelect, - smoothStreaming = true, }: ChatContentProps) { - const hydratedStreamingRef = useRef(isStreaming && content.trim().length > 0) - const previousIsStreamingRef = useRef(isStreaming) - - useEffect(() => { - if (!previousIsStreamingRef.current && isStreaming && content.trim().length > 0) { - hydratedStreamingRef.current = true - } else if (!isStreaming) { - hydratedStreamingRef.current = false - } - previousIsStreamingRef.current = isStreaming - }, [content, isStreaming]) - const onWorkspaceResourceSelectRef = useRef(onWorkspaceResourceSelect) onWorkspaceResourceSelectRef.current = onWorkspaceResourceSelect @@ -270,9 +255,7 @@ export function ChatContent({ return () => window.removeEventListener('wsres-click', handler) }, []) - const rendered = useStreamingText(content, isStreaming && smoothStreaming) - - const parsed = useMemo(() => parseSpecialTags(rendered, isStreaming), [rendered, isStreaming]) + const parsed = useMemo(() => parseSpecialTags(content, isStreaming), [content, isStreaming]) const hasSpecialContent = parsed.hasPendingTag || parsed.segments.some((s) => s.type !== 'text') if (hasSpecialContent) { @@ -322,7 +305,10 @@ export function ChatContent({ key={`inline-${i}`} className={cn(PROSE_CLASSES, '[&>:first-child]:mt-0 [&>:last-child]:mb-0')} > - + {group.markdown}
    @@ -343,13 +329,8 @@ export function ChatContent({ return (
    :first-child]:mt-0 [&>:last-child]:mb-0')}> - - {rendered} + + {content}
    ) diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx index d5261300461..0a58d8c2b34 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx @@ -415,7 +415,7 @@ function OptionsDisplay({ data, onSelect }: OptionsDisplayProps) { if (entries.length === 0) return null return ( -
    +
    {disabled ? (
    ) @@ -139,8 +139,8 @@ export function ExecutionSnapshot({ Logged State Not Found
    - This log was migrated from the old logging system. The workflow state at execution time - is not available. + This log was migrated from the old logging system. The workflow state at the time of + this run is not available.
    Note: {workflowState._note} @@ -191,7 +191,7 @@ export function ExecutionSnapshot({ > - Copy Execution ID + Copy Run ID , diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/log-details/log-details.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/log-details/log-details.tsx index 3c8f4e499d1..994b4d7daf0 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/log-details/log-details.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/log-details/log-details.tsx @@ -448,11 +448,11 @@ export const LogDetails = memo(function LogDetails({
    - {/* Execution ID */} + {/* Run ID */} {log.executionId && (
    - Execution ID + Run ID {log.executionId} @@ -576,7 +576,7 @@ export const LogDetails = memo(function LogDetails({
    - Base Execution: + Base Run: {formatCost(BASE_EXECUTION_CHARGE)} @@ -643,8 +643,8 @@ export const LogDetails = memo(function LogDetails({

    - Total cost includes a base execution charge of{' '} - {formatCost(BASE_EXECUTION_CHARGE)} plus any model and tool usage costs. + Total cost includes a base run charge of {formatCost(BASE_EXECUTION_CHARGE)}{' '} + plus any model and tool usage costs.

    diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx index 2daf13aca1f..a9dba9f471d 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx @@ -77,14 +77,14 @@ export const LogRowContextMenu = memo(function LogRowContextMenu({ <> - Cancel Execution + Cancel Run )} - Copy Execution ID + Copy Run ID diff --git a/apps/sim/app/workspace/[workspaceId]/settings/components/subscription/plan-configs.ts b/apps/sim/app/workspace/[workspaceId]/settings/components/subscription/plan-configs.ts index 2d3f51580ef..054a6de576d 100644 --- a/apps/sim/app/workspace/[workspaceId]/settings/components/subscription/plan-configs.ts +++ b/apps/sim/app/workspace/[workspaceId]/settings/components/subscription/plan-configs.ts @@ -15,7 +15,7 @@ import type { PlanFeature } from '@/app/workspace/[workspaceId]/settings/compone export const PRO_PLAN_FEATURES: PlanFeature[] = [ { icon: Zap, text: '150 runs/min (sync)' }, { icon: Clock, text: '1,000 runs/min (async)' }, - { icon: Timer, text: '50 min sync execution limit' }, + { icon: Timer, text: '50 min sync run limit' }, { icon: HardDrive, text: '50GB file storage' }, { icon: Table2, text: '25 tables · 5,000 rows each' }, ] @@ -23,7 +23,7 @@ export const PRO_PLAN_FEATURES: PlanFeature[] = [ export const MAX_PLAN_FEATURES: PlanFeature[] = [ { icon: Zap, text: '300 runs/min (sync)' }, { icon: Clock, text: '2,500 runs/min (async)' }, - { icon: Timer, text: '50 min sync execution limit' }, + { icon: Timer, text: '50 min sync run limit' }, { icon: HardDrive, text: '500GB file storage' }, { icon: Table2, text: '25 tables · 5,000 rows each' }, ] diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts index f323d4c3871..e8e1fcea9aa 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts @@ -449,7 +449,7 @@ export function addExecutionErrorConsoleEntry( const isPreExecutionError = params.isPreExecutionError ?? false if (!isPreExecutionError && hasBlockError) return - const errorMessage = params.error || 'Execution failed' + const errorMessage = params.error || 'Run failed' const isTimeout = errorMessage.toLowerCase().includes('timed out') const timing = buildExecutionTiming(params.durationMs) @@ -469,7 +469,7 @@ export function addExecutionErrorConsoleEntry( ? 'Workflow Validation' : isTimeout ? 'Timeout Error' - : 'Execution Error', + : 'Run Error', blockType: isPreExecutionError ? 'validation' : 'error', }) } @@ -514,7 +514,7 @@ export function addHttpErrorConsoleEntry( workflowId: params.workflowId, blockId: isValidationError ? 'validation' : 'execution-error', executionId: params.executionId, - blockName: isValidationError ? 'Workflow Validation' : 'Execution Error', + blockName: isValidationError ? 'Workflow Validation' : 'Run Error', blockType: isValidationError ? 'validation' : 'error', }) } @@ -537,7 +537,7 @@ export function addCancelledConsoleEntry( input: {}, output: {}, success: false, - error: 'Execution was cancelled', + error: 'Run was cancelled', durationMs: timing.durationMs, startedAt: timing.startedAt, executionOrder: Number.MAX_SAFE_INTEGER, @@ -545,7 +545,7 @@ export function addCancelledConsoleEntry( workflowId: params.workflowId, blockId: 'cancelled', executionId: params.executionId, - blockName: 'Execution Cancelled', + blockName: 'Run Cancelled', blockType: 'cancelled', }) } @@ -652,7 +652,7 @@ export async function executeWorkflowWithFullLogging( if (!response.ok) { const error = await response.json() - const errorMessage = error.error || 'Workflow execution failed' + const errorMessage = error.error || 'Workflow run failed' addHttpErrorConsoleEntry(addConsole, { workflowId: wfId, executionId, @@ -721,14 +721,14 @@ export async function executeWorkflowWithFullLogging( executionResult = { success: false, output: {}, - error: 'Execution was cancelled', + error: 'Run was cancelled', logs: accumulatedBlockLogs, } }, onExecutionError: (data) => { setCurrentExecutionId(wfId, null) - const errorMessage = data.error || 'Execution failed' + const errorMessage = data.error || 'Run failed' executionResult = { success: false, output: {}, diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/workspace-header/workspace-header.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/workspace-header/workspace-header.tsx index ea722bb2ce9..4d48a62bff0 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/workspace-header/workspace-header.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/workspace-header/workspace-header.tsx @@ -2,7 +2,7 @@ import { useEffect, useRef, useState } from 'react' import { createLogger } from '@sim/logger' -import { MoreHorizontal } from 'lucide-react' +import { MoreHorizontal, Search } from 'lucide-react' import { Button, ChevronDown, @@ -11,6 +11,7 @@ import { DropdownMenuGroup, DropdownMenuSeparator, DropdownMenuTrigger, + Input, Modal, ModalBody, ModalContent, @@ -34,6 +35,9 @@ import { useSettingsNavigation } from '@/hooks/use-settings-navigation' const logger = createLogger('WorkspaceHeader') +/** Minimum workspace count before the search input and keyboard navigation are shown. */ +const WORKSPACE_SEARCH_THRESHOLD = 3 + interface WorkspaceHeaderProps { /** The active workspace object */ activeWorkspace?: { name: string } | null @@ -120,6 +124,22 @@ export function WorkspaceHeader({ const [editingWorkspaceId, setEditingWorkspaceId] = useState(null) const [editingName, setEditingName] = useState('') const [isListRenaming, setIsListRenaming] = useState(false) + const [workspaceSearch, setWorkspaceSearch] = useState('') + const [highlightedIndex, setHighlightedIndex] = useState(0) + const searchInputRef = useRef(null) + const workspaceListRef = useRef(null) + + useEffect(() => { + const row = workspaceListRef.current?.querySelector( + `[data-workspace-row-idx="${highlightedIndex}"]` + ) + row?.scrollIntoView({ block: 'nearest' }) + }, [highlightedIndex]) + + const searchQuery = workspaceSearch.trim().toLowerCase() + const filteredWorkspaces = searchQuery + ? workspaces.filter((w) => w.name.toLowerCase().includes(searchQuery)) + : workspaces const [contextMenuPosition, setContextMenuPosition] = useState({ x: 0, y: 0 }) const [isContextMenuOpen, setIsContextMenuOpen] = useState(false) @@ -173,6 +193,15 @@ export function WorkspaceHeader({ } }, [isWorkspaceMenuOpen, editingWorkspaceId, editingName, workspaces, onRenameWorkspace]) + useEffect(() => { + if (isWorkspaceMenuOpen) { + setHighlightedIndex(0) + const id = requestAnimationFrame(() => searchInputRef.current?.focus()) + return () => cancelAnimationFrame(id) + } + setWorkspaceSearch('') + }, [isWorkspaceMenuOpen]) + const activeWorkspaceFull = workspaces.find((w) => w.id === workspaceId) || null const workspaceInitial = (() => { @@ -466,10 +495,57 @@ export function WorkspaceHeader({
    - -
    - {workspaces.map((workspace) => ( -
    + {workspaces.length > WORKSPACE_SEARCH_THRESHOLD && ( +
    + + { + setWorkspaceSearch(e.target.value) + setHighlightedIndex(0) + }} + onKeyDown={(e) => { + e.stopPropagation() + if (filteredWorkspaces.length === 0) return + if (e.key === 'ArrowDown') { + e.preventDefault() + setHighlightedIndex((i) => (i + 1) % filteredWorkspaces.length) + } else if (e.key === 'ArrowUp') { + e.preventDefault() + setHighlightedIndex( + (i) => (i - 1 + filteredWorkspaces.length) % filteredWorkspaces.length + ) + } else if (e.key === 'Enter') { + e.preventDefault() + const target = filteredWorkspaces[highlightedIndex] + if (target) onWorkspaceSwitch(target) + } + }} + className='h-auto flex-1 border-0 bg-transparent p-0 text-caption leading-none placeholder:text-[var(--text-tertiary)] focus-visible:ring-0 focus-visible:ring-offset-0' + /> +
    + )} + +
    + {filteredWorkspaces.length === 0 && workspaceSearch && ( +
    + No workspaces match "{workspaceSearch}" +
    + )} + {filteredWorkspaces.map((workspace, idx) => ( +
    setHighlightedIndex(idx)} + > {editingWorkspaceId === workspace.id ? (
    WORKSPACE_SEARCH_THRESHOLD && + workspace.id !== workspaceId && + menuOpenWorkspaceId !== workspace.id && + 'bg-[var(--surface-hover)]' )} - onClick={() => onWorkspaceSwitch(workspace)} + onClick={(e) => { + if (e.metaKey || e.ctrlKey) { + window.open(`/workspace/${workspace.id}/home`, '_blank') + return + } + onWorkspaceSwitch(workspace) + }} + onAuxClick={(e) => { + if (e.button === 1) { + e.preventDefault() + window.open(`/workspace/${workspace.id}/home`, '_blank') + } + }} onContextMenu={(e) => handleContextMenu(e, workspace)} > {workspace.name} diff --git a/apps/sim/blocks/blocks/confluence.ts b/apps/sim/blocks/blocks/confluence.ts index 8d90cfda4ca..e66efd70a08 100644 --- a/apps/sim/blocks/blocks/confluence.ts +++ b/apps/sim/blocks/blocks/confluence.ts @@ -128,6 +128,7 @@ export const ConfluenceBlock: BlockConfig = { title: 'Title', type: 'short-input', placeholder: 'Enter title for the page', + required: { field: 'operation', value: 'create' }, condition: { field: 'operation', value: ['create', 'update'] }, }, { @@ -135,6 +136,7 @@ export const ConfluenceBlock: BlockConfig = { title: 'Content', type: 'long-input', placeholder: 'Enter content for the page', + required: { field: 'operation', value: 'create' }, condition: { field: 'operation', value: ['create', 'update'] }, }, { @@ -766,6 +768,7 @@ export const ConfluenceV2Block: BlockConfig = { title: 'Title', type: 'short-input', placeholder: 'Enter title', + required: { field: 'operation', value: ['create', 'create_blogpost'] }, condition: { field: 'operation', value: ['create', 'update', 'create_blogpost', 'update_blogpost', 'update_space'], @@ -776,6 +779,7 @@ export const ConfluenceV2Block: BlockConfig = { title: 'Content', type: 'long-input', placeholder: 'Enter content', + required: { field: 'operation', value: ['create', 'create_blogpost'] }, condition: { field: 'operation', value: ['create', 'update', 'create_blogpost', 'update_blogpost'], diff --git a/apps/sim/blocks/blocks/jira.ts b/apps/sim/blocks/blocks/jira.ts index c6c93e4b3d2..b1eaf339d14 100644 --- a/apps/sim/blocks/blocks/jira.ts +++ b/apps/sim/blocks/blocks/jira.ts @@ -91,7 +91,7 @@ export const JiraBlock: BlockConfig = { placeholder: 'Select Jira project', dependsOn: ['credential', 'domain'], mode: 'basic', - required: { field: 'operation', value: ['write', 'update', 'read-bulk'] }, + required: { field: 'operation', value: ['write', 'read-bulk'] }, }, // Manual project ID input (advanced mode) { @@ -102,7 +102,7 @@ export const JiraBlock: BlockConfig = { placeholder: 'Enter Jira project ID', dependsOn: ['credential', 'domain'], mode: 'advanced', - required: { field: 'operation', value: ['write', 'update', 'read-bulk'] }, + required: { field: 'operation', value: ['write', 'read-bulk'] }, }, // Issue selector (basic mode) { @@ -218,9 +218,8 @@ export const JiraBlock: BlockConfig = { id: 'summary', title: 'New Summary', type: 'short-input', - required: true, + required: { field: 'operation', value: 'write' }, placeholder: 'Enter new summary for the issue', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['update', 'write'] }, wandConfig: { enabled: true, @@ -240,7 +239,6 @@ Return ONLY the summary text - no explanations.`, title: 'New Description', type: 'long-input', placeholder: 'Enter new description for the issue', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['update', 'write'] }, wandConfig: { enabled: true, @@ -279,7 +277,6 @@ Return ONLY the description text - no explanations.`, title: 'Assignee Account ID', type: 'short-input', placeholder: 'Assignee account ID (e.g., 5b109f2e9729b51b54dc274d)', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -287,7 +284,6 @@ Return ONLY the description text - no explanations.`, title: 'Priority', type: 'short-input', placeholder: 'Priority ID or name (e.g., "10000" or "High")', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -295,7 +291,6 @@ Return ONLY the description text - no explanations.`, title: 'Labels', type: 'short-input', placeholder: 'Comma-separated labels (e.g., bug, urgent)', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -303,7 +298,6 @@ Return ONLY the description text - no explanations.`, title: 'Due Date', type: 'short-input', placeholder: 'YYYY-MM-DD (e.g., 2024-12-31)', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, wandConfig: { enabled: true, @@ -332,7 +326,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n title: 'Environment', type: 'long-input', placeholder: 'Environment information (e.g., Production, Staging)', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -340,7 +333,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n title: 'Custom Field ID', type: 'short-input', placeholder: 'e.g., customfield_10001 or 10001', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -348,7 +340,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n title: 'Custom Field Value', type: 'short-input', placeholder: 'Value for the custom field', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -356,7 +347,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n title: 'Components', type: 'short-input', placeholder: 'Comma-separated component names', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { @@ -364,7 +354,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n title: 'Fix Versions', type: 'short-input', placeholder: 'Comma-separated fix version names', - dependsOn: ['projectId'], condition: { field: 'operation', value: ['write', 'update'] }, }, { diff --git a/apps/sim/blocks/blocks/microsoft_excel.ts b/apps/sim/blocks/blocks/microsoft_excel.ts index 4b04368742c..8106d234251 100644 --- a/apps/sim/blocks/blocks/microsoft_excel.ts +++ b/apps/sim/blocks/blocks/microsoft_excel.ts @@ -68,6 +68,13 @@ export const MicrosoftExcelBlock: BlockConfig = { dependsOn: ['credential'], mode: 'basic', }, + { + id: 'driveId', + title: 'Drive ID (SharePoint)', + type: 'short-input', + placeholder: 'Leave empty for OneDrive, or enter drive ID for SharePoint', + mode: 'advanced', + }, { id: 'manualSpreadsheetId', title: 'Spreadsheet ID', @@ -249,9 +256,17 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, } }, params: (params) => { - const { oauthCredential, values, spreadsheetId, tableName, worksheetName, ...rest } = params + const { + oauthCredential, + values, + spreadsheetId, + tableName, + worksheetName, + driveId, + siteId: _siteId, + ...rest + } = params - // Use canonical param ID (raw subBlock IDs are deleted after serialization) const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' let parsedValues @@ -276,6 +291,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, const baseParams = { ...rest, spreadsheetId: effectiveSpreadsheetId, + driveId: driveId ? String(driveId).trim() : undefined, values: parsedValues, oauthCredential, } @@ -302,6 +318,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, operation: { type: 'string', description: 'Operation to perform' }, oauthCredential: { type: 'string', description: 'Microsoft Excel access token' }, spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, + driveId: { type: 'string', description: 'Drive ID for SharePoint document libraries' }, range: { type: 'string', description: 'Cell range' }, tableName: { type: 'string', description: 'Table name' }, worksheetName: { type: 'string', description: 'Worksheet name' }, @@ -377,6 +394,47 @@ export const MicrosoftExcelV2Block: BlockConfig = { placeholder: 'Enter credential ID', required: true, }, + // File Source selector (both modes) + { + id: 'fileSource', + title: 'File Source', + type: 'dropdown', + options: [ + { label: 'OneDrive', id: 'onedrive' }, + { label: 'SharePoint', id: 'sharepoint' }, + ], + value: () => 'onedrive', + }, + // SharePoint Site Selector (basic mode, only when SharePoint is selected) + { + id: 'siteSelector', + title: 'SharePoint Site', + type: 'file-selector', + canonicalParamId: 'siteId', + serviceId: 'sharepoint', + selectorKey: 'sharepoint.sites', + requiredScopes: [], + placeholder: 'Select a SharePoint site', + dependsOn: ['credential', 'fileSource'], + condition: { field: 'fileSource', value: 'sharepoint' }, + required: { field: 'fileSource', value: 'sharepoint' }, + mode: 'basic', + }, + // SharePoint Drive Selector (basic mode, only when SharePoint is selected) + { + id: 'driveSelector', + title: 'Document Library', + type: 'file-selector', + canonicalParamId: 'driveId', + serviceId: 'microsoft-excel', + selectorKey: 'microsoft.excel.drives', + selectorAllowSearch: false, + placeholder: 'Select a document library', + dependsOn: ['credential', 'siteSelector', 'fileSource'], + condition: { field: 'fileSource', value: 'sharepoint' }, + required: { field: 'fileSource', value: 'sharepoint' }, + mode: 'basic', + }, // Spreadsheet Selector (basic mode) { id: 'spreadsheetId', @@ -388,9 +446,20 @@ export const MicrosoftExcelV2Block: BlockConfig = { requiredScopes: [], mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', placeholder: 'Select a spreadsheet', - dependsOn: ['credential'], + dependsOn: { all: ['credential', 'fileSource'], any: ['credential', 'driveSelector'] }, mode: 'basic', }, + // Drive ID for SharePoint (advanced mode, only when SharePoint is selected) + { + id: 'manualDriveId', + title: 'Drive ID', + type: 'short-input', + canonicalParamId: 'driveId', + placeholder: 'Enter the SharePoint drive ID', + condition: { field: 'fileSource', value: 'sharepoint' }, + dependsOn: ['fileSource'], + mode: 'advanced', + }, // Manual Spreadsheet ID (advanced mode) { id: 'manualSpreadsheetId', @@ -398,7 +467,7 @@ export const MicrosoftExcelV2Block: BlockConfig = { type: 'short-input', canonicalParamId: 'spreadsheetId', placeholder: 'Enter spreadsheet ID', - dependsOn: ['credential'], + dependsOn: { all: ['credential'], any: ['credential', 'manualDriveId'] }, mode: 'advanced', }, // Sheet Name Selector (basic mode) @@ -412,7 +481,10 @@ export const MicrosoftExcelV2Block: BlockConfig = { selectorAllowSearch: false, placeholder: 'Select a sheet', required: true, - dependsOn: { all: ['credential'], any: ['spreadsheetId', 'manualSpreadsheetId'] }, + dependsOn: { + all: ['credential'], + any: ['spreadsheetId', 'manualSpreadsheetId', 'driveSelector'], + }, mode: 'basic', }, // Manual Sheet Name (advanced mode) @@ -423,7 +495,10 @@ export const MicrosoftExcelV2Block: BlockConfig = { canonicalParamId: 'sheetName', placeholder: 'Name of the sheet/tab (e.g., Sheet1)', required: true, - dependsOn: ['credential'], + dependsOn: { + all: ['credential'], + any: ['credential', 'manualDriveId'], + }, mode: 'advanced', }, // Cell Range (optional for read/write) @@ -514,11 +589,20 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, fallbackToolId: 'microsoft_excel_read_v2', }), params: (params) => { - const { oauthCredential, values, spreadsheetId, sheetName, cellRange, ...rest } = params + const { + oauthCredential, + values, + spreadsheetId, + sheetName, + cellRange, + driveId, + siteId: _siteId, + fileSource: _fileSource, + ...rest + } = params const parsedValues = values ? JSON.parse(values as string) : undefined - // Use canonical param IDs (raw subBlock IDs are deleted after serialization) const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' const effectiveSheetName = sheetName ? String(sheetName).trim() : '' @@ -535,6 +619,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, spreadsheetId: effectiveSpreadsheetId, sheetName: effectiveSheetName, cellRange: cellRange ? (cellRange as string).trim() : undefined, + driveId: driveId ? String(driveId).trim() : undefined, values: parsedValues, oauthCredential, } @@ -543,7 +628,10 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, }, inputs: { operation: { type: 'string', description: 'Operation to perform' }, + fileSource: { type: 'string', description: 'File source (onedrive or sharepoint)' }, oauthCredential: { type: 'string', description: 'Microsoft Excel access token' }, + siteId: { type: 'string', description: 'SharePoint site ID (used for drive/file browsing)' }, + driveId: { type: 'string', description: 'Drive ID for SharePoint document libraries' }, spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, sheetName: { type: 'string', description: 'Name of the sheet/tab (canonical param)' }, cellRange: { type: 'string', description: 'Cell range (e.g., A1:D10)' }, diff --git a/apps/sim/components/emails/notifications/workflow-notification-email.tsx b/apps/sim/components/emails/notifications/workflow-notification-email.tsx index ad08e608bbb..3389c8cad37 100644 --- a/apps/sim/components/emails/notifications/workflow-notification-email.tsx +++ b/apps/sim/components/emails/notifications/workflow-notification-email.tsx @@ -70,7 +70,7 @@ export function WorkflowNotificationEmail({ const message = alertReason ? 'An alert was triggered for your workflow.' : isError - ? 'Your workflow execution failed.' + ? 'Your workflow run failed.' : 'Your workflow completed successfully.' return ( @@ -102,7 +102,7 @@ export function WorkflowNotificationEmail({ - View Execution Log + View Run Log {rateLimits && (rateLimits.sync || rateLimits.async) ? ( diff --git a/apps/sim/components/emcn/icons/play.tsx b/apps/sim/components/emcn/icons/play.tsx index cf22598ba03..990a651a71b 100644 --- a/apps/sim/components/emcn/icons/play.tsx +++ b/apps/sim/components/emcn/icons/play.tsx @@ -32,7 +32,7 @@ export function PlayOutline(props: SVGProps) { ) { aria-hidden='true' {...props} > - + ) } diff --git a/apps/sim/content/blog/copilot/index.mdx b/apps/sim/content/blog/copilot/index.mdx index 98add964847..5e65575549d 100644 --- a/apps/sim/content/blog/copilot/index.mdx +++ b/apps/sim/content/blog/copilot/index.mdx @@ -12,7 +12,7 @@ ogImage: /blog/copilot/cover.png ogAlt: 'Sim Copilot technical overview' about: ['AI Assistants', 'Agentic Workflows', 'Retrieval Augmented Generation'] timeRequired: PT7M -canonical: https://sim.ai/blog/copilot +canonical: https://www.sim.ai/blog/copilot featured: false draft: true --- diff --git a/apps/sim/content/blog/emcn/index.mdx b/apps/sim/content/blog/emcn/index.mdx index 9dddba8244f..8b427baf27d 100644 --- a/apps/sim/content/blog/emcn/index.mdx +++ b/apps/sim/content/blog/emcn/index.mdx @@ -12,7 +12,7 @@ ogImage: /blog/emcn/cover.png ogAlt: 'Emcn design system cover' about: ['Design Systems', 'Component Libraries', 'Design Tokens', 'Accessibility'] timeRequired: PT6M -canonical: https://sim.ai/blog/emcn +canonical: https://www.sim.ai/blog/emcn featured: false draft: true --- diff --git a/apps/sim/content/blog/enterprise/index.mdx b/apps/sim/content/blog/enterprise/index.mdx index 3f57456617a..81bf3acdad4 100644 --- a/apps/sim/content/blog/enterprise/index.mdx +++ b/apps/sim/content/blog/enterprise/index.mdx @@ -12,7 +12,7 @@ ogImage: /blog/enterprise/cover.png ogAlt: 'Sim Enterprise features overview' about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting'] timeRequired: PT10M -canonical: https://sim.ai/blog/enterprise +canonical: https://www.sim.ai/blog/enterprise featured: true draft: false --- diff --git a/apps/sim/content/blog/executor/index.mdx b/apps/sim/content/blog/executor/index.mdx index 61c9407ee44..01b410ba57d 100644 --- a/apps/sim/content/blog/executor/index.mdx +++ b/apps/sim/content/blog/executor/index.mdx @@ -12,7 +12,7 @@ ogImage: /blog/executor/cover.png ogAlt: 'Sim Executor technical overview' about: ['Execution', 'Workflow Orchestration'] timeRequired: PT12M -canonical: https://sim.ai/blog/executor +canonical: https://www.sim.ai/blog/executor featured: false draft: false --- diff --git a/apps/sim/content/blog/mothership/index.mdx b/apps/sim/content/blog/mothership/index.mdx index 5205c6023df..ff01969d2be 100644 --- a/apps/sim/content/blog/mothership/index.mdx +++ b/apps/sim/content/blog/mothership/index.mdx @@ -8,11 +8,11 @@ authors: - emir readingTime: 10 tags: [Release, Mothership, Tables, Knowledge Base, Connectors, RAG, Sim] -ogImage: /blog/mothership/cover.png +ogImage: /blog/mothership/cover.jpg ogAlt: 'Sim v0.6 release announcement' about: ['AI Agents', 'Workflow Automation', 'Developer Tools'] timeRequired: PT10M -canonical: https://sim.ai/blog/mothership +canonical: https://www.sim.ai/blog/mothership featured: true draft: false --- diff --git a/apps/sim/content/blog/multiplayer/index.mdx b/apps/sim/content/blog/multiplayer/index.mdx index 71a48fa89fd..5d32e444a1e 100644 --- a/apps/sim/content/blog/multiplayer/index.mdx +++ b/apps/sim/content/blog/multiplayer/index.mdx @@ -9,7 +9,7 @@ authors: readingTime: 12 tags: [Multiplayer, Realtime, Collaboration, WebSockets, Architecture] ogImage: /blog/multiplayer/cover.png -canonical: https://sim.ai/blog/multiplayer +canonical: https://www.sim.ai/blog/multiplayer draft: false --- diff --git a/apps/sim/content/blog/openai-vs-n8n-vs-sim/index.mdx b/apps/sim/content/blog/openai-vs-n8n-vs-sim/index.mdx index 9026829f56f..ea21ba1fc34 100644 --- a/apps/sim/content/blog/openai-vs-n8n-vs-sim/index.mdx +++ b/apps/sim/content/blog/openai-vs-n8n-vs-sim/index.mdx @@ -9,7 +9,7 @@ authors: readingTime: 9 tags: [AI Agents, Workflow Automation, OpenAI AgentKit, n8n, Sim, MCP] ogImage: /blog/openai-vs-n8n-vs-sim/workflow.png -canonical: https://sim.ai/blog/openai-vs-n8n-vs-sim +canonical: https://www.sim.ai/blog/openai-vs-n8n-vs-sim draft: false --- diff --git a/apps/sim/content/blog/series-a/index.mdx b/apps/sim/content/blog/series-a/index.mdx index e029c884a28..ee119fb7170 100644 --- a/apps/sim/content/blog/series-a/index.mdx +++ b/apps/sim/content/blog/series-a/index.mdx @@ -13,7 +13,7 @@ ogImage: /blog/series-a/cover.png ogAlt: 'Sim team photo in front of neon logo' about: ['Artificial Intelligence', 'Agentic Workflows', 'Startups', 'Funding'] timeRequired: PT4M -canonical: https://sim.ai/blog/series-a +canonical: https://www.sim.ai/blog/series-a featured: true draft: false --- diff --git a/apps/sim/content/blog/v0-5/index.mdx b/apps/sim/content/blog/v0-5/index.mdx index b97609f41c7..b4b80137580 100644 --- a/apps/sim/content/blog/v0-5/index.mdx +++ b/apps/sim/content/blog/v0-5/index.mdx @@ -12,7 +12,7 @@ ogImage: /blog/v0-5/cover.png ogAlt: 'Sim v0.5 release announcement' about: ['AI Agents', 'Workflow Automation', 'Developer Tools'] timeRequired: PT8M -canonical: https://sim.ai/blog/v0-5 +canonical: https://www.sim.ai/blog/v0-5 featured: true draft: false --- diff --git a/apps/sim/ee/whitelabeling/metadata.ts b/apps/sim/ee/whitelabeling/metadata.ts index cfaefd63f47..1048f56ed62 100644 --- a/apps/sim/ee/whitelabeling/metadata.ts +++ b/apps/sim/ee/whitelabeling/metadata.ts @@ -1,5 +1,5 @@ import type { Metadata } from 'next' -import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBaseUrl, SITE_URL } from '@/lib/core/utils/urls' import { getBrandConfig } from '@/ee/whitelabeling/branding' /** @@ -150,7 +150,7 @@ export function generateStructuredData() { creator: { '@type': 'Organization', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, }, featureList: [ 'AI Workspace for Teams', diff --git a/apps/sim/hooks/queries/logs.ts b/apps/sim/hooks/queries/logs.ts index 835b5d7f97c..cab3f63ecbd 100644 --- a/apps/sim/hooks/queries/logs.ts +++ b/apps/sim/hooks/queries/logs.ts @@ -292,9 +292,9 @@ export function useCancelExecution() { const res = await fetch(`/api/workflows/${workflowId}/executions/${executionId}/cancel`, { method: 'POST', }) - if (!res.ok) throw new Error('Failed to cancel execution') + if (!res.ok) throw new Error('Failed to cancel run') const data = await res.json() - if (!data.success) throw new Error('Failed to cancel execution') + if (!data.success) throw new Error('Failed to cancel run') return data }, onMutate: async ({ executionId }) => { diff --git a/apps/sim/hooks/queries/tasks.ts b/apps/sim/hooks/queries/tasks.ts index e45429d613c..ebfd65bd38f 100644 --- a/apps/sim/hooks/queries/tasks.ts +++ b/apps/sim/hooks/queries/tasks.ts @@ -485,21 +485,23 @@ export function useRemoveChatResource(chatId?: string) { onMutate: async ({ resourceType, resourceId }) => { if (!chatId) return await queryClient.cancelQueries({ queryKey: taskKeys.detail(chatId) }) - const previous = queryClient.getQueryData(taskKeys.detail(chatId)) - if (previous) { - queryClient.setQueryData(taskKeys.detail(chatId), { - ...previous, - resources: previous.resources.filter( - (r) => !(r.type === resourceType && r.id === resourceId) - ), - }) - } - return { previous } + const removed: TaskChatHistory['resources'] = [] + queryClient.setQueryData(taskKeys.detail(chatId), (prev) => { + if (!prev) return prev + const next: TaskChatHistory['resources'] = [] + for (const r of prev.resources) { + if (r.type === resourceType && r.id === resourceId) removed.push(r) + else next.push(r) + } + return removed.length > 0 ? { ...prev, resources: next } : prev + }) + return { removed } }, onError: (_err, _variables, context) => { - if (context?.previous && chatId) { - queryClient.setQueryData(taskKeys.detail(chatId), context.previous) - } + if (!chatId || !context?.removed.length) return + queryClient.setQueryData(taskKeys.detail(chatId), (prev) => + prev ? { ...prev, resources: [...prev.resources, ...context.removed] } : prev + ) }, onSettled: () => { if (chatId) { diff --git a/apps/sim/hooks/selectors/registry.ts b/apps/sim/hooks/selectors/registry.ts index 0fe0d1b84bb..6b053994257 100644 --- a/apps/sim/hooks/selectors/registry.ts +++ b/apps/sim/hooks/selectors/registry.ts @@ -1504,6 +1504,7 @@ const registry: Record = { 'microsoft.excel.sheets', context.oauthCredential ?? 'none', context.spreadsheetId ?? 'none', + context.driveId ?? 'none', ], enabled: ({ context }) => Boolean(context.oauthCredential && context.spreadsheetId), fetchList: async ({ context }: SelectorQueryArgs) => { @@ -1517,6 +1518,7 @@ const registry: Record = { searchParams: { credentialId, spreadsheetId: context.spreadsheetId, + driveId: context.driveId, workflowId: context.workflowId, }, } @@ -1527,6 +1529,54 @@ const registry: Record = { })) }, }, + 'microsoft.excel.drives': { + key: 'microsoft.excel.drives', + staleTime: SELECTOR_STALE, + getQueryKey: ({ context }: SelectorQueryArgs) => [ + 'selectors', + 'microsoft.excel.drives', + context.oauthCredential ?? 'none', + context.siteId ?? 'none', + ], + enabled: ({ context }) => Boolean(context.oauthCredential && context.siteId), + fetchList: async ({ context }: SelectorQueryArgs) => { + const credentialId = ensureCredential(context, 'microsoft.excel.drives') + if (!context.siteId) { + throw new Error('Missing site ID for microsoft.excel.drives selector') + } + const body = JSON.stringify({ + credential: credentialId, + workflowId: context.workflowId, + siteId: context.siteId, + }) + const data = await fetchJson<{ drives: { id: string; name: string }[] }>( + '/api/tools/microsoft_excel/drives', + { method: 'POST', body } + ) + return (data.drives || []).map((drive) => ({ + id: drive.id, + label: drive.name, + })) + }, + fetchById: async ({ context, detailId }: SelectorQueryArgs) => { + if (!detailId || !context.siteId) return null + const credentialId = ensureCredential(context, 'microsoft.excel.drives') + const data = await fetchJson<{ drive: { id: string; name: string } }>( + '/api/tools/microsoft_excel/drives', + { + method: 'POST', + body: JSON.stringify({ + credential: credentialId, + workflowId: context.workflowId, + siteId: context.siteId, + driveId: detailId, + }), + } + ) + if (!data.drive) return null + return { id: data.drive.id, label: data.drive.name } + }, + }, 'microsoft.excel': { key: 'microsoft.excel', staleTime: SELECTOR_STALE, @@ -1534,6 +1584,7 @@ const registry: Record = { 'selectors', 'microsoft.excel', context.oauthCredential ?? 'none', + context.driveId ?? 'none', search ?? '', ], enabled: ({ context }) => Boolean(context.oauthCredential), @@ -1545,6 +1596,7 @@ const registry: Record = { searchParams: { credentialId, query: search, + driveId: context.driveId, workflowId: context.workflowId, }, } diff --git a/apps/sim/hooks/selectors/types.ts b/apps/sim/hooks/selectors/types.ts index bd5bcac547b..c4423b52e33 100644 --- a/apps/sim/hooks/selectors/types.ts +++ b/apps/sim/hooks/selectors/types.ts @@ -40,6 +40,7 @@ export type SelectorKey = | 'onedrive.folders' | 'sharepoint.sites' | 'microsoft.excel' + | 'microsoft.excel.drives' | 'microsoft.excel.sheets' | 'microsoft.word' | 'microsoft.planner' @@ -75,6 +76,7 @@ export interface SelectorContext { siteId?: string collectionId?: string spreadsheetId?: string + driveId?: string excludeWorkflowId?: string baseId?: string datasetId?: string diff --git a/apps/sim/hooks/use-streaming-text.ts b/apps/sim/hooks/use-streaming-text.ts deleted file mode 100644 index 369977a0f64..00000000000 --- a/apps/sim/hooks/use-streaming-text.ts +++ /dev/null @@ -1,100 +0,0 @@ -'use client' - -import { useEffect, useRef, useState } from 'react' - -const TICK_MS = 16 -const MIN_CHARS_PER_TICK = 3 -const CHASE_FACTOR = 0.3 -const RESUME_IDLE_MS = 140 -const RESUME_RAMP_MS = 180 - -function easeOutCubic(t: number): number { - const clamped = Math.max(0, Math.min(1, t)) - return 1 - (1 - clamped) ** 3 -} - -/** - * Progressively reveals streaming text character-by-character at a steady - * rate regardless of how the data arrives. - * - * Small deltas (individual tokens) reveal at the base rate of 3 chars per - * 16 ms. Large gaps (burst arrivals) catch up exponentially via - * CHASE_FACTOR so the reveal never falls far behind. - * - * When `isStreaming` is false the target is returned directly. - */ -export function useStreamingText(target: string, isStreaming: boolean): string { - const [displayed, setDisplayed] = useState(target) - const revealedRef = useRef(target) - const targetRef = useRef(target) - const lastTargetLengthRef = useRef(target.length) - const lastTargetChangeAtRef = useRef(Date.now()) - const resumeStartedAtRef = useRef(null) - - targetRef.current = target - - useEffect(() => { - const now = Date.now() - const previousLength = lastTargetLengthRef.current - const nextLength = target.length - - if (nextLength > previousLength) { - const idleFor = now - lastTargetChangeAtRef.current - if (isStreaming && idleFor >= RESUME_IDLE_MS) { - resumeStartedAtRef.current = now - } - lastTargetChangeAtRef.current = now - } else if (nextLength < previousLength) { - lastTargetChangeAtRef.current = now - resumeStartedAtRef.current = null - } - - lastTargetLengthRef.current = nextLength - }, [target, isStreaming]) - - useEffect(() => { - if (isStreaming) return - if (revealedRef.current === target) return - revealedRef.current = target - lastTargetChangeAtRef.current = Date.now() - lastTargetLengthRef.current = target.length - resumeStartedAtRef.current = null - setDisplayed(target) - }, [target, isStreaming]) - - useEffect(() => { - if (!isStreaming) return - - if (targetRef.current.length < revealedRef.current.length) { - revealedRef.current = '' - } - - const timer = setInterval(() => { - const now = Date.now() - const current = revealedRef.current - const tgt = targetRef.current - if (current.length >= tgt.length) return - - const gap = tgt.length - current.length - const normalChars = Math.max(MIN_CHARS_PER_TICK, Math.ceil(gap * CHASE_FACTOR)) - - let chars = normalChars - const resumeStartedAt = resumeStartedAtRef.current - if (resumeStartedAt !== null) { - const progress = easeOutCubic((now - resumeStartedAt) / RESUME_RAMP_MS) - chars = Math.max(MIN_CHARS_PER_TICK, Math.ceil(normalChars * progress)) - if (progress >= 1) { - resumeStartedAtRef.current = null - } - } - - chars = Math.min(gap, chars) - revealedRef.current = tgt.slice(0, current.length + chars) - setDisplayed(revealedRef.current) - }, TICK_MS) - - return () => clearInterval(timer) - }, [isStreaming]) - - return displayed -} diff --git a/apps/sim/lib/blog/seo.ts b/apps/sim/lib/blog/seo.ts index d7e7693158c..a3fbd3f520e 100644 --- a/apps/sim/lib/blog/seo.ts +++ b/apps/sim/lib/blog/seo.ts @@ -1,5 +1,6 @@ import type { Metadata } from 'next' import type { BlogMeta } from '@/lib/blog/schema' +import { SITE_URL } from '@/lib/core/utils/urls' export function buildPostMetadata(post: BlogMeta): Metadata { const base = new URL(post.canonical) @@ -85,10 +86,10 @@ export function buildArticleJsonLd(post: BlogMeta) { publisher: { '@type': 'Organization', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, logo: { '@type': 'ImageObject', - url: 'https://sim.ai/logo/primary/medium.png', + url: `${SITE_URL}/logo/primary/medium.png`, }, }, mainEntityOfPage: { @@ -112,8 +113,8 @@ export function buildBreadcrumbJsonLd(post: BlogMeta) { return { '@type': 'BreadcrumbList', itemListElement: [ - { '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' }, - { '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' }, + { '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }, + { '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` }, { '@type': 'ListItem', position: 3, name: post.title, item: post.canonical }, ], } @@ -150,22 +151,22 @@ export function buildCollectionPageJsonLd() { '@context': 'https://schema.org', '@type': 'CollectionPage', name: 'Sim Blog', - url: 'https://sim.ai/blog', + url: `${SITE_URL}/blog`, description: 'Announcements, insights, and guides for building AI agents.', publisher: { '@type': 'Organization', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, logo: { '@type': 'ImageObject', - url: 'https://sim.ai/logo/primary/medium.png', + url: `${SITE_URL}/logo/primary/medium.png`, }, }, inLanguage: 'en-US', isPartOf: { '@type': 'WebSite', name: 'Sim', - url: 'https://sim.ai', + url: SITE_URL, }, } } diff --git a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts index 3f9136e3aa6..2af7b1d660e 100644 --- a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts @@ -17,6 +17,7 @@ export interface ToolCatalogEntry { | 'create_job' | 'create_workflow' | 'create_workspace_mcp_server' + | 'debug' | 'delete_file' | 'delete_folder' | 'delete_workflow' @@ -70,6 +71,7 @@ export interface ToolCatalogEntry { | 'respond' | 'restore_resource' | 'revert_to_version' + | 'run' | 'run_block' | 'run_from_block' | 'run_workflow' @@ -105,6 +107,7 @@ export interface ToolCatalogEntry { | 'create_job' | 'create_workflow' | 'create_workspace_mcp_server' + | 'debug' | 'delete_file' | 'delete_folder' | 'delete_workflow' @@ -158,6 +161,7 @@ export interface ToolCatalogEntry { | 'respond' | 'restore_resource' | 'revert_to_version' + | 'run' | 'run_block' | 'run_from_block' | 'run_workflow' @@ -187,11 +191,13 @@ export interface ToolCatalogEntry { subagentId?: | 'agent' | 'auth' + | 'debug' | 'deploy' | 'file' | 'job' | 'knowledge' | 'research' + | 'run' | 'superagent' | 'table' | 'workflow' @@ -444,6 +450,31 @@ export const CreateWorkspaceMcpServer: ToolCatalogEntry = { requiredPermission: 'admin', } +export const Debug: ToolCatalogEntry = { + id: 'debug', + name: 'debug', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + context: { + description: + 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', + type: 'string', + }, + request: { + description: + 'What to debug. Include error messages, block IDs, and any context about the failure.', + type: 'string', + }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'debug', + internal: true, +} + export const DeleteFile: ToolCatalogEntry = { id: 'delete_file', name: 'delete_file', @@ -2039,7 +2070,8 @@ export const Read: ToolCatalogEntry = { }, path: { type: 'string', - description: "Path to the file to read (e.g. 'workflows/My Workflow/state.json').", + description: + "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", }, }, required: ['path'], @@ -2231,6 +2263,26 @@ export const RevertToVersion: ToolCatalogEntry = { requiredPermission: 'admin', } +export const Run: ToolCatalogEntry = { + id: 'run', + name: 'run', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + context: { + description: 'Pre-gathered context: workflow state, block IDs, input requirements.', + type: 'string', + }, + request: { description: 'What to run or what logs to check.', type: 'string' }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'run', + internal: true, +} + export const RunBlock: ToolCatalogEntry = { id: 'run_block', name: 'run_block', @@ -3264,6 +3316,7 @@ export const TOOL_CATALOG: Record = { [CreateJob.id]: CreateJob, [CreateWorkflow.id]: CreateWorkflow, [CreateWorkspaceMcpServer.id]: CreateWorkspaceMcpServer, + [Debug.id]: Debug, [DeleteFile.id]: DeleteFile, [DeleteFolder.id]: DeleteFolder, [DeleteWorkflow.id]: DeleteWorkflow, @@ -3317,6 +3370,7 @@ export const TOOL_CATALOG: Record = { [Respond.id]: Respond, [RestoreResource.id]: RestoreResource, [RevertToVersion.id]: RevertToVersion, + [Run.id]: Run, [RunBlock.id]: RunBlock, [RunFromBlock.id]: RunFromBlock, [RunWorkflow.id]: RunWorkflow, diff --git a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts index ac4b51df2c2..78e624c8473 100644 --- a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts @@ -266,6 +266,25 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { }, resultSchema: undefined, }, + debug: { + parameters: { + properties: { + context: { + description: + 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', + type: 'string', + }, + request: { + description: + 'What to debug. Include error messages, block IDs, and any context about the failure.', + type: 'string', + }, + }, + required: ['request'], + type: 'object', + }, + resultSchema: undefined, + }, delete_file: { parameters: { type: 'object', @@ -1872,7 +1891,8 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { }, path: { type: 'string', - description: "Path to the file to read (e.g. 'workflows/My Workflow/state.json').", + description: + "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", }, }, required: ['path'], @@ -2070,6 +2090,23 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { }, resultSchema: undefined, }, + run: { + parameters: { + properties: { + context: { + description: 'Pre-gathered context: workflow state, block IDs, input requirements.', + type: 'string', + }, + request: { + description: 'What to run or what logs to check.', + type: 'string', + }, + }, + required: ['request'], + type: 'object', + }, + resultSchema: undefined, + }, run_block: { parameters: { type: 'object', diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index 43f56abe120..d9403094698 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { MothershipStreamV1EventType, @@ -16,8 +17,10 @@ import type { } from '@/lib/copilot/request/types' import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' import { env } from '@/lib/core/config/env' +import { isHosted } from '@/lib/core/config/feature-flags' import { generateId } from '@/lib/core/utils/uuid' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { getWorkflowById } from '@/lib/workflows/utils' const logger = createLogger('CopilotSubagentOrchestrator') @@ -49,10 +52,40 @@ export async function orchestrateSubagentStream( options: SubagentOrchestratorOptions ): Promise { const { userId, workflowId, workspaceId, userPermission } = options - const execContext = await buildExecutionContext(userId, workflowId, workspaceId) + const chatId = + (typeof requestPayload.chatId === 'string' && requestPayload.chatId) || generateId() + const execContext = await buildExecutionContext(userId, workflowId, workspaceId, chatId) + let resolvedWorkflowName = + typeof requestPayload.workflowName === 'string' ? requestPayload.workflowName : undefined + let resolvedWorkspaceId = + execContext.workspaceId || + (typeof requestPayload.workspaceId === 'string' ? requestPayload.workspaceId : workspaceId) + + if (workflowId && (!resolvedWorkflowName || !resolvedWorkspaceId)) { + const workflow = await getWorkflowById(workflowId) + resolvedWorkflowName ||= workflow?.name || undefined + resolvedWorkspaceId ||= workflow?.workspaceId || undefined + } + + let resolvedWorkspaceContext = + typeof requestPayload.workspaceContext === 'string' + ? requestPayload.workspaceContext + : undefined + if (!resolvedWorkspaceContext && resolvedWorkspaceId) { + try { + resolvedWorkspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) + } catch (error) { + logger.warn('Failed to generate workspace context for subagent request', { + agentId, + workspaceId: resolvedWorkspaceId, + error: error instanceof Error ? error.message : String(error), + }) + } + } const msgId = requestPayload?.messageId const context = createStreamingContext({ + chatId, messageId: typeof msgId === 'string' ? msgId : generateId(), }) @@ -69,8 +102,13 @@ export async function orchestrateSubagentStream( }, body: JSON.stringify({ ...requestPayload, + chatId, userId, stream: true, + ...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}), + ...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}), + ...(resolvedWorkspaceContext ? { workspaceContext: resolvedWorkspaceContext } : {}), + isHosted, ...(userPermission ? { userPermission } : {}), }), }, @@ -135,16 +173,18 @@ function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['s async function buildExecutionContext( userId: string, workflowId?: string, - workspaceId?: string + workspaceId?: string, + chatId?: string ): Promise { if (workflowId) { - return prepareExecutionContext(userId, workflowId) + return prepareExecutionContext(userId, workflowId, chatId, { workspaceId }) } const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) return { userId, workflowId: workflowId || '', workspaceId, + chatId, decryptedEnvVars, } } diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 52ba61daede..65e02d1e94b 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -284,16 +284,18 @@ CAN DO: - Configure block settings and connections - Set environment variables and workflow variables - Move, rename, delete workflows and folders +- Run or inspect workflows through the nested run/debug specialists when validation is needed +- Delegate deployment or auth setup to the nested specialists when needed CANNOT DO: -- Run or test workflows (use sim_test separately) -- Deploy workflows (use sim_deploy separately) +- Replace dedicated testing flows like sim_test when you want a standalone execution-only pass +- Replace dedicated deploy flows like sim_deploy when you want deployment as a separate step WORKFLOW: 1. Call create_workflow to get a workflowId (for new workflows) 2. Call sim_workflow with the request and workflowId -3. Workflow agent gathers info and builds in one pass -4. Call sim_test to verify it works +3. Workflow agent gathers info, builds, and can delegate run/debug/auth/deploy help in one pass +4. Call sim_test when you want a dedicated execution-only verification pass 5. Optionally call sim_deploy to make it externally accessible`, inputSchema: { type: 'object', @@ -375,7 +377,7 @@ ALSO CAN: }, { name: 'sim_test', - agentId: 'test', + agentId: 'run', description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness. Supports full and partial execution: @@ -476,7 +478,7 @@ Supports full and partial execution: name: 'sim_info', agentId: 'info', description: - "Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.", + "Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Provide workflowId when you want results scoped to a specific workflow.", inputSchema: { type: 'object', properties: { @@ -488,22 +490,6 @@ Supports full and partial execution: }, annotations: { readOnlyHint: true }, }, - { - name: 'sim_workflow', - agentId: 'workflow', - description: - 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workflowId: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - annotations: { destructiveHint: false }, - }, { name: 'sim_research', agentId: 'research', diff --git a/apps/sim/lib/core/utils/urls.ts b/apps/sim/lib/core/utils/urls.ts index 5be78eb1d7a..15712176a8d 100644 --- a/apps/sim/lib/core/utils/urls.ts +++ b/apps/sim/lib/core/utils/urls.ts @@ -1,6 +1,9 @@ import { getEnv } from '@/lib/core/config/env' import { isProd } from '@/lib/core/config/feature-flags' +/** Canonical base URL for the public-facing marketing site. No trailing slash. */ +export const SITE_URL = 'https://www.sim.ai' + function hasHttpProtocol(url: string): boolean { return /^https?:\/\//i.test(url) } diff --git a/apps/sim/lib/logs/execution/logging-session.ts b/apps/sim/lib/logs/execution/logging-session.ts index 3b0a20bd642..59a03a32a2c 100644 --- a/apps/sim/lib/logs/execution/logging-session.ts +++ b/apps/sim/lib/logs/execution/logging-session.ts @@ -555,7 +555,7 @@ export class LoggingSession { models: {}, } - const message = error?.message || 'Execution failed before starting blocks' + const message = error?.message || 'Run failed before starting blocks' const errorSpan: TraceSpan = { id: 'workflow-error-root', @@ -994,7 +994,7 @@ export class LoggingSession { traceSpans: params?.traceSpans, endedAt: params?.endedAt, totalDurationMs: params?.totalDurationMs, - errorMessage: 'Execution was cancelled', + errorMessage: 'Run was cancelled', isError: false, finalizationPath: 'cancelled', finalOutput: { cancelled: true }, @@ -1021,7 +1021,7 @@ export class LoggingSession { traceSpans: params?.traceSpans, endedAt: params?.endedAt, totalDurationMs: params?.totalDurationMs, - errorMessage: 'Execution paused but failed to store full trace spans', + errorMessage: 'Run paused but failed to store full trace spans', isError: false, finalizationPath: 'paused', finalOutput: { paused: true }, @@ -1041,7 +1041,7 @@ export class LoggingSession { requestId?: string ): Promise { try { - const message = errorMessage || 'Execution failed' + const message = errorMessage || 'Run failed' await db .update(workflowExecutionLogs) .set({ diff --git a/apps/sim/lib/logs/search-suggestions.ts b/apps/sim/lib/logs/search-suggestions.ts index 2892baf7aaa..859d40ddf16 100644 --- a/apps/sim/lib/logs/search-suggestions.ts +++ b/apps/sim/lib/logs/search-suggestions.ts @@ -57,25 +57,25 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [ { key: 'cost', label: 'Cost', - description: 'Filter by execution cost', + description: 'Filter by run cost', options: [ { value: '>0.01', label: 'Over 2 credits', - description: 'Executions costing more than 2 credits', + description: 'Runs costing more than 2 credits', }, { value: '<0.005', label: 'Under 1 credit', - description: 'Executions costing less than 1 credit', + description: 'Runs costing less than 1 credit', }, { value: '>0.05', label: 'Over 10 credits', - description: 'Executions costing more than 10 credits', + description: 'Runs costing more than 10 credits', }, - { value: '=0', label: 'Free', description: 'Free executions' }, - { value: '>0', label: 'Paid', description: 'Executions with cost' }, + { value: '=0', label: 'Free', description: 'Free runs' }, + { value: '>0', label: 'Paid', description: 'Runs with cost' }, ], }, { @@ -104,13 +104,13 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [ { key: 'duration', label: 'Duration', - description: 'Filter by execution duration', + description: 'Filter by run duration', options: [ - { value: '>5s', label: 'Over 5s', description: 'Executions longer than 5 seconds' }, - { value: '<1s', label: 'Under 1s', description: 'Executions shorter than 1 second' }, - { value: '>10s', label: 'Over 10s', description: 'Executions longer than 10 seconds' }, - { value: '>30s', label: 'Over 30s', description: 'Executions longer than 30 seconds' }, - { value: '<500ms', label: 'Under 0.5s', description: 'Very fast executions' }, + { value: '>5s', label: 'Over 5s', description: 'Runs longer than 5 seconds' }, + { value: '<1s', label: 'Under 1s', description: 'Runs shorter than 1 second' }, + { value: '>10s', label: 'Over 10s', description: 'Runs longer than 10 seconds' }, + { value: '>30s', label: 'Over 30s', description: 'Runs longer than 30 seconds' }, + { value: '<500ms', label: 'Under 0.5s', description: 'Very fast runs' }, ], }, ] @@ -225,8 +225,8 @@ export class SearchSuggestions { suggestions.push({ id: 'filter-key-executionId', value: 'executionId:', - label: 'Execution ID', - description: 'Filter by execution ID', + label: 'Run ID', + description: 'Filter by run ID', category: 'filters', }) @@ -283,7 +283,7 @@ export class SearchSuggestions { id: `filter-value-trigger-${t.value}`, value: `trigger:${t.value}`, label: t.label, - description: `${t.label}-triggered executions`, + description: `${t.label}-triggered runs`, category: 'trigger' as const, color: t.color, })) @@ -604,7 +604,7 @@ export class SearchSuggestions { id: `trigger-match-${trigger.value}`, value: `trigger:${trigger.value}`, label: trigger.label, - description: `${trigger.label}-triggered executions`, + description: `${trigger.label}-triggered runs`, category: 'trigger' as const, color: trigger.color, })) diff --git a/apps/sim/lib/workflows/subblocks/context.ts b/apps/sim/lib/workflows/subblocks/context.ts index 6f41759cffa..eca39260ecc 100644 --- a/apps/sim/lib/workflows/subblocks/context.ts +++ b/apps/sim/lib/workflows/subblocks/context.ts @@ -17,6 +17,7 @@ export const SELECTOR_CONTEXT_FIELDS = new Set([ 'siteId', 'collectionId', 'spreadsheetId', + 'driveId', 'fileId', 'baseId', 'datasetId', diff --git a/apps/sim/next.config.ts b/apps/sim/next.config.ts index 9cd085983f0..bf7e51ce5d4 100644 --- a/apps/sim/next.config.ts +++ b/apps/sim/next.config.ts @@ -149,6 +149,15 @@ const nextConfig: NextConfig = { ], async headers() { return [ + { + source: '/:all*(svg|jpg|jpeg|png|gif|ico|webp|avif|woff|woff2|ttf|eot)', + headers: [ + { + key: 'Cache-Control', + value: 'public, max-age=86400, stale-while-revalidate=604800', + }, + ], + }, { source: '/.well-known/:path*', headers: [ @@ -386,12 +395,12 @@ const nextConfig: NextConfig = { redirects.push( { source: '/building/:path*', - destination: 'https://sim.ai/blog/:path*', + destination: 'https://www.sim.ai/blog/:path*', permanent: true, }, { source: '/studio/:path*', - destination: 'https://sim.ai/blog/:path*', + destination: 'https://www.sim.ai/blog/:path*', permanent: true, } ) diff --git a/apps/sim/public/blog/executor/cover.png b/apps/sim/public/blog/executor/cover.png index 5f9031fcbf7..0dfda81e3df 100644 Binary files a/apps/sim/public/blog/executor/cover.png and b/apps/sim/public/blog/executor/cover.png differ diff --git a/apps/sim/public/blog/mothership/cover.jpg b/apps/sim/public/blog/mothership/cover.jpg new file mode 100644 index 00000000000..64d8c63dc29 Binary files /dev/null and b/apps/sim/public/blog/mothership/cover.jpg differ diff --git a/apps/sim/public/blog/mothership/cover.png b/apps/sim/public/blog/mothership/cover.png deleted file mode 100644 index c023f635727..00000000000 Binary files a/apps/sim/public/blog/mothership/cover.png and /dev/null differ diff --git a/apps/sim/public/blog/openai-vs-n8n-vs-sim/workflow.png b/apps/sim/public/blog/openai-vs-n8n-vs-sim/workflow.png index e5ba786385a..c37b9a5d494 100644 Binary files a/apps/sim/public/blog/openai-vs-n8n-vs-sim/workflow.png and b/apps/sim/public/blog/openai-vs-n8n-vs-sim/workflow.png differ diff --git a/apps/sim/public/blog/series-a/cover.png b/apps/sim/public/blog/series-a/cover.png index 71aeb92ce84..0a9ce8b8e43 100644 Binary files a/apps/sim/public/blog/series-a/cover.png and b/apps/sim/public/blog/series-a/cover.png differ diff --git a/apps/sim/public/blog/v0-5/cover.png b/apps/sim/public/blog/v0-5/cover.png index 6ccf4ba8e36..fa6f3729cd3 100644 Binary files a/apps/sim/public/blog/v0-5/cover.png and b/apps/sim/public/blog/v0-5/cover.png differ diff --git a/apps/sim/public/landing/multiplayer-cover.png b/apps/sim/public/landing/multiplayer-cover.png index ae54fbac62e..76f6eaf16e7 100644 Binary files a/apps/sim/public/landing/multiplayer-cover.png and b/apps/sim/public/landing/multiplayer-cover.png differ diff --git a/apps/sim/tools/microsoft_excel/read.ts b/apps/sim/tools/microsoft_excel/read.ts index b1ffe5f3207..22a3aea0a39 100644 --- a/apps/sim/tools/microsoft_excel/read.ts +++ b/apps/sim/tools/microsoft_excel/read.ts @@ -6,6 +6,7 @@ import type { MicrosoftExcelV2ToolParams, } from '@/tools/microsoft_excel/types' import { + getItemBasePath, getSpreadsheetWebUrl, trimTrailingEmptyRowsAndColumns, } from '@/tools/microsoft_excel/utils' @@ -35,6 +36,13 @@ export const readTool: ToolConfig { @@ -91,6 +98,9 @@ export const readTool: ToolConfig { + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId + // If we came from the worksheets listing (no range provided), resolve first sheet name then fetch range if (response.url.includes('/workbook/worksheets?')) { const listData = await response.json() @@ -100,23 +110,19 @@ export const readTool: ToolConfig { @@ -294,20 +289,19 @@ export const readV2Tool: ToolConfig { const data = await response.json() - const urlParts = response.url.split('/drive/items/') - const spreadsheetId = urlParts[1]?.split('/')[0] || '' + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId const accessToken = params?.accessToken if (!accessToken) { throw new Error('Access token is required') } - const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken) + const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId) const address: string = data.address || data.addressLocal || '' const rawValues: ExcelCellValue[][] = data.values || [] const values = trimTrailingEmptyRowsAndColumns(rawValues) - // Extract sheet name from address (format: SheetName!A1:B2) const sheetName = params?.sheetName || address.split('!')[0] || '' return { diff --git a/apps/sim/tools/microsoft_excel/table_add.ts b/apps/sim/tools/microsoft_excel/table_add.ts index c84047e1146..eeca045617a 100644 --- a/apps/sim/tools/microsoft_excel/table_add.ts +++ b/apps/sim/tools/microsoft_excel/table_add.ts @@ -2,7 +2,7 @@ import type { MicrosoftExcelTableAddResponse, MicrosoftExcelTableToolParams, } from '@/tools/microsoft_excel/types' -import { getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' +import { getItemBasePath, getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' import type { ToolConfig } from '@/tools/types' export const tableAddTool: ToolConfig< @@ -33,6 +33,13 @@ export const tableAddTool: ToolConfig< description: 'The ID of the spreadsheet/workbook containing the table (e.g., "01ABC123DEF456")', }, + driveId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive.', + }, tableName: { type: 'string', required: true, @@ -51,7 +58,8 @@ export const tableAddTool: ToolConfig< request: { url: (params) => { const tableName = encodeURIComponent(params.tableName) - return `https://graph.microsoft.com/v1.0/me/drive/items/${params.spreadsheetId}/workbook/tables('${tableName}')/rows/add` + const basePath = getItemBasePath(params.spreadsheetId, params.driveId) + return `${basePath}/workbook/tables('${tableName}')/rows/add` }, method: 'POST', headers: (params) => ({ @@ -106,34 +114,26 @@ export const tableAddTool: ToolConfig< transformResponse: async (response: Response, params?: MicrosoftExcelTableToolParams) => { const data = await response.json() - const urlParts = response.url.split('/drive/items/') - const spreadsheetId = urlParts[1]?.split('/')[0] || '' + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId - // Fetch the browser-accessible web URL const accessToken = params?.accessToken if (!accessToken) { throw new Error('Access token is required') } - const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken) + const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId) - const metadata = { - spreadsheetId, - spreadsheetUrl: webUrl, - } - - const result = { + return { success: true, output: { index: data.index || 0, values: data.values || [], metadata: { - spreadsheetId: metadata.spreadsheetId, - spreadsheetUrl: metadata.spreadsheetUrl, + spreadsheetId, + spreadsheetUrl: webUrl, }, }, } - - return result }, outputs: { diff --git a/apps/sim/tools/microsoft_excel/types.ts b/apps/sim/tools/microsoft_excel/types.ts index 1a05fafbcec..2032ce34cb5 100644 --- a/apps/sim/tools/microsoft_excel/types.ts +++ b/apps/sim/tools/microsoft_excel/types.ts @@ -63,6 +63,7 @@ export interface MicrosoftExcelWorksheetAddResponse extends ToolResponse { export interface MicrosoftExcelToolParams { accessToken: string spreadsheetId: string + driveId?: string range?: string values?: ExcelCellValue[][] valueInputOption?: 'RAW' | 'USER_ENTERED' @@ -75,6 +76,7 @@ export interface MicrosoftExcelToolParams { export interface MicrosoftExcelTableToolParams { accessToken: string spreadsheetId: string + driveId?: string tableName: string values: ExcelCellValue[][] rowIndex?: number @@ -83,6 +85,7 @@ export interface MicrosoftExcelTableToolParams { export interface MicrosoftExcelWorksheetToolParams { accessToken: string spreadsheetId: string + driveId?: string worksheetName: string } @@ -96,6 +99,7 @@ export type MicrosoftExcelResponse = export interface MicrosoftExcelV2ToolParams { accessToken: string spreadsheetId: string + driveId?: string sheetName: string cellRange?: string values?: ExcelCellValue[][] diff --git a/apps/sim/tools/microsoft_excel/utils.ts b/apps/sim/tools/microsoft_excel/utils.ts index dc10e74629d..80f27e93c48 100644 --- a/apps/sim/tools/microsoft_excel/utils.ts +++ b/apps/sim/tools/microsoft_excel/utils.ts @@ -1,8 +1,39 @@ import { createLogger } from '@sim/logger' +import { validatePathSegment } from '@/lib/core/security/input-validation' import type { ExcelCellValue } from '@/tools/microsoft_excel/types' const logger = createLogger('MicrosoftExcelUtils') +/** Pattern for Microsoft Graph item/drive IDs: alphanumeric, hyphens, underscores, and ! (for SharePoint b! format) */ +export const GRAPH_ID_PATTERN = /^[a-zA-Z0-9!_-]+$/ + +/** + * Returns the Graph API base path for an Excel item. + * When driveId is provided, uses /drives/{driveId}/items/{itemId} (SharePoint/shared drives). + * When driveId is omitted, uses /me/drive/items/{itemId} (personal OneDrive). + */ +export function getItemBasePath(spreadsheetId: string, driveId?: string): string { + const spreadsheetValidation = validatePathSegment(spreadsheetId, { + paramName: 'spreadsheetId', + customPattern: GRAPH_ID_PATTERN, + }) + if (!spreadsheetValidation.isValid) { + throw new Error(spreadsheetValidation.error) + } + + if (driveId) { + const driveValidation = validatePathSegment(driveId, { + paramName: 'driveId', + customPattern: GRAPH_ID_PATTERN, + }) + if (!driveValidation.isValid) { + throw new Error(driveValidation.error) + } + return `https://graph.microsoft.com/v1.0/drives/${driveId}/items/${spreadsheetId}` + } + return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}` +} + export function trimTrailingEmptyRowsAndColumns(matrix: ExcelCellValue[][]): ExcelCellValue[][] { if (!Array.isArray(matrix) || matrix.length === 0) return [] @@ -43,33 +74,32 @@ export function trimTrailingEmptyRowsAndColumns(matrix: ExcelCellValue[][]): Exc */ export async function getSpreadsheetWebUrl( spreadsheetId: string, - accessToken: string + accessToken: string, + driveId?: string ): Promise { + const basePath = getItemBasePath(spreadsheetId, driveId) try { - const response = await fetch( - `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}?$select=id,webUrl`, - { - headers: { - Authorization: `Bearer ${accessToken}`, - }, - } - ) + const response = await fetch(`${basePath}?$select=id,webUrl`, { + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }) if (!response.ok) { logger.warn('Failed to fetch spreadsheet webUrl, using Graph API URL as fallback', { spreadsheetId, status: response.status, }) - return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}` + return basePath } const data = await response.json() - return data.webUrl || `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}` + return data.webUrl || basePath } catch (error) { logger.warn('Error fetching spreadsheet webUrl, using Graph API URL as fallback', { spreadsheetId, error, }) - return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}` + return basePath } } diff --git a/apps/sim/tools/microsoft_excel/worksheet_add.ts b/apps/sim/tools/microsoft_excel/worksheet_add.ts index 1350bc55532..6cfc91b47db 100644 --- a/apps/sim/tools/microsoft_excel/worksheet_add.ts +++ b/apps/sim/tools/microsoft_excel/worksheet_add.ts @@ -2,7 +2,7 @@ import type { MicrosoftExcelWorksheetAddResponse, MicrosoftExcelWorksheetToolParams, } from '@/tools/microsoft_excel/types' -import { getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' +import { getItemBasePath, getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' import type { ToolConfig } from '@/tools/types' /** @@ -36,6 +36,13 @@ export const worksheetAddTool: ToolConfig< visibility: 'user-or-llm', description: 'The ID of the Excel workbook to add the worksheet to (e.g., "01ABC123DEF456")', }, + driveId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive.', + }, worksheetName: { type: 'string', required: true, @@ -51,7 +58,8 @@ export const worksheetAddTool: ToolConfig< if (!spreadsheetId) { throw new Error('Spreadsheet ID is required') } - return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets/add` + const basePath = getItemBasePath(spreadsheetId, params.driveId) + return `${basePath}/workbook/worksheets/add` }, method: 'POST', headers: (params) => { @@ -106,15 +114,14 @@ export const worksheetAddTool: ToolConfig< const data = await response.json() - const urlParts = response.url.split('/drive/items/') - const spreadsheetId = urlParts[1]?.split('/')[0] || '' + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId - // Fetch the browser-accessible web URL const accessToken = params?.accessToken if (!accessToken) { throw new Error('Access token is required') } - const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken) + const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId) const result: MicrosoftExcelWorksheetAddResponse = { success: true, diff --git a/apps/sim/tools/microsoft_excel/write.ts b/apps/sim/tools/microsoft_excel/write.ts index 335d59acc6c..1e5fa7b3bde 100644 --- a/apps/sim/tools/microsoft_excel/write.ts +++ b/apps/sim/tools/microsoft_excel/write.ts @@ -4,7 +4,7 @@ import type { MicrosoftExcelV2WriteResponse, MicrosoftExcelWriteResponse, } from '@/tools/microsoft_excel/types' -import { getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' +import { getItemBasePath, getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils' import type { ToolConfig } from '@/tools/types' export const writeTool: ToolConfig = { @@ -31,6 +31,13 @@ export const writeTool: ToolConfig { const data = await response.json() - const urlParts = response.url.split('/drive/items/') - const spreadsheetId = urlParts[1]?.split('/')[0] || '' + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId - // Fetch the browser-accessible web URL const accessToken = params?.accessToken if (!accessToken) { throw new Error('Access token is required') } - const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken) - - const metadata = { - spreadsheetId, - properties: {}, - spreadsheetUrl: webUrl, - } + const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId) - const result = { + return { success: true, output: { updatedRange: data.updatedRange, @@ -161,13 +162,11 @@ export const writeTool: ToolConfig { const data = await response.json() - const urlParts = response.url.split('/drive/items/') - const spreadsheetId = urlParts[1]?.split('/')[0] || '' + const spreadsheetId = params?.spreadsheetId?.trim() || '' + const driveId = params?.driveId const accessToken = params?.accessToken if (!accessToken) { throw new Error('Access token is required') } - const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken) + const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId) return { success: true,