diff --git a/apps/docs/content/docs/en/blocks/response.mdx b/apps/docs/content/docs/en/blocks/response.mdx
index 327bbaa6eb0..6ec1872fe14 100644
--- a/apps/docs/content/docs/en/blocks/response.mdx
+++ b/apps/docs/content/docs/en/blocks/response.mdx
@@ -20,7 +20,7 @@ The Response block formats and sends structured HTTP responses back to API calle
- Response blocks are terminal blocks - they end workflow execution and cannot connect to other blocks.
+ Response blocks are exit points — when a Response block executes, it ends the workflow and sends the HTTP response immediately. Multiple Response blocks can be placed on different branches (e.g. after a Router or Condition), but only the first one to execute determines the API response.
## Configuration Options
@@ -77,7 +77,11 @@ Condition (Error Detected) → Router → Response (400/500, Error Details)
## Outputs
-Response blocks are terminal — no downstream blocks execute after them. However, the block does define outputs (`data`, `status`, `headers`) which are used to construct the HTTP response sent back to the API caller.
+Response blocks are exit points — when one executes, no further blocks run. The block defines outputs (`data`, `status`, `headers`) which are used to construct the HTTP response sent back to the API caller.
+
+
+ If a Response block is placed on a parallel branch, there are no guarantees about whether other parallel blocks will run or not. Execution order across parallel branches is non-deterministic, so a parallel block may execute before or after the Response block on any given run. Avoid placing Response blocks in parallel with blocks that have important side effects.
+
## Variable References
@@ -110,10 +114,10 @@ Use the `` syntax to dynamically insert workflow variables into y
- **Validate variable references**: Ensure all referenced variables exist and contain the expected data types before the Response block executes
diff --git a/apps/docs/content/docs/en/execution/basics.mdx b/apps/docs/content/docs/en/execution/basics.mdx
index 1541831e770..1777b7fdcfb 100644
--- a/apps/docs/content/docs/en/execution/basics.mdx
+++ b/apps/docs/content/docs/en/execution/basics.mdx
@@ -96,8 +96,9 @@ Understanding these core principles will help you build better workflows:
2. **Automatic Parallelization**: Independent blocks run concurrently without configuration
3. **Smart Data Flow**: Outputs flow automatically to connected blocks
4. **Error Handling**: Failed blocks stop their execution path but don't affect independent paths
-5. **State Persistence**: All block outputs and execution details are preserved for debugging
-6. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops
+5. **Response Blocks as Exit Points**: When a Response block executes, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to execute wins
+6. **State Persistence**: All block outputs and execution details are preserved for debugging
+7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops
## Next Steps
diff --git a/apps/sim/blocks/blocks/response.ts b/apps/sim/blocks/blocks/response.ts
index 82f5ddb58bb..d11f6814850 100644
--- a/apps/sim/blocks/blocks/response.ts
+++ b/apps/sim/blocks/blocks/response.ts
@@ -12,12 +12,13 @@ export const ResponseBlock: BlockConfig = {
bestPractices: `
- Only use this if the trigger block is the API Trigger.
- Prefer the builder mode over the editor mode.
- - This is usually used as the last block in the workflow.
+ - The Response block is an exit point. When it executes, the workflow stops and the API response is sent immediately.
+ - Multiple Response blocks can be placed on different branches (e.g. after a Router or Condition). The first one to execute determines the API response and ends the workflow.
+ - If a Response block is on a parallel branch, there are no guarantees about whether other parallel blocks will run. Avoid placing Response blocks in parallel with blocks that have important side effects.
`,
category: 'blocks',
bgColor: '#2F55FF',
icon: ResponseIcon,
- singleInstance: true,
subBlocks: [
{
id: 'dataMode',
diff --git a/apps/sim/executor/execution/engine.test.ts b/apps/sim/executor/execution/engine.test.ts
index ce62d78e33d..f9c0ab41209 100644
--- a/apps/sim/executor/execution/engine.test.ts
+++ b/apps/sim/executor/execution/engine.test.ts
@@ -957,6 +957,297 @@ describe('ExecutionEngine', () => {
})
})
+ describe('Response block exit-point behavior', () => {
+ it('should lock finalOutput and stop execution when a terminal Response block fires', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const responseNode = createMockNode('response', 'response')
+
+ startNode.outgoingEdges.set('edge1', { target: 'response' })
+
+ const dag = createMockDAG([startNode, responseNode])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['response']
+ return []
+ })
+
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'response') {
+ return {
+ nodeId,
+ output: { data: { message: 'ok' }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { message: 'ok' }, status: 200, headers: {} })
+ expect(nodeOrchestrator.executionCount).toBe(2)
+ })
+
+ it('should stop execution after Response block on a branch (Router)', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const routerNode = createMockNode('router', 'router')
+ const successResponse = createMockNode('success-response', 'response')
+ const errorResponse = createMockNode('error-response', 'response')
+
+ startNode.outgoingEdges.set('edge1', { target: 'router' })
+ routerNode.outgoingEdges.set('success', { target: 'success-response' })
+ routerNode.outgoingEdges.set('error', { target: 'error-response' })
+
+ const dag = createMockDAG([startNode, routerNode, successResponse, errorResponse])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['router']
+ if (node.id === 'router') return ['success-response']
+ return []
+ })
+
+ const executedNodes: string[] = []
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ executedNodes.push(nodeId)
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'success-response') {
+ return {
+ nodeId,
+ output: { data: { result: 'success' }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { result: 'success' }, status: 200, headers: {} })
+ expect(executedNodes).not.toContain('error-response')
+ })
+
+ it('should stop all branches when a parallel Response block fires first', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const responseNode = createMockNode('fast-response', 'response')
+ const slowNode = createMockNode('slow-work', 'function')
+ const afterSlowNode = createMockNode('after-slow', 'function')
+
+ startNode.outgoingEdges.set('edge1', { target: 'fast-response' })
+ startNode.outgoingEdges.set('edge2', { target: 'slow-work' })
+ slowNode.outgoingEdges.set('edge3', { target: 'after-slow' })
+
+ const dag = createMockDAG([startNode, responseNode, slowNode, afterSlowNode])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['fast-response', 'slow-work']
+ if (node.id === 'slow-work') return ['after-slow']
+ return []
+ })
+
+ const executedNodes: string[] = []
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ executedNodes.push(nodeId)
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'fast-response') {
+ return {
+ nodeId,
+ output: { data: { fast: true }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ if (nodeId === 'slow-work') {
+ await new Promise((resolve) => setTimeout(resolve, 1))
+ return { nodeId, output: { slow: true }, isFinalOutput: false }
+ }
+ return { nodeId, output: {}, isFinalOutput: true }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { fast: true }, status: 200, headers: {} })
+ expect(executedNodes).not.toContain('after-slow')
+ })
+
+ it('should use standard finalOutput logic when no Response block exists', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const endNode = createMockNode('end', 'function')
+ startNode.outgoingEdges.set('edge1', { target: 'end' })
+
+ const dag = createMockDAG([startNode, endNode])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['end']
+ return []
+ })
+
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'end') {
+ return { nodeId, output: { result: 'done' }, isFinalOutput: true }
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ result: 'done' })
+ })
+
+ it('should not let a second Response block overwrite the first', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const response1 = createMockNode('response1', 'response')
+ const response2 = createMockNode('response2', 'response')
+
+ startNode.outgoingEdges.set('edge1', { target: 'response1' })
+ startNode.outgoingEdges.set('edge2', { target: 'response2' })
+
+ const dag = createMockDAG([startNode, response1, response2])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['response1', 'response2']
+ return []
+ })
+
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'response1') {
+ return {
+ nodeId,
+ output: { data: { first: true }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ if (nodeId === 'response2') {
+ return {
+ nodeId,
+ output: { data: { second: true }, status: 201, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { first: true }, status: 200, headers: {} })
+ })
+
+ it('should not let non-Response terminals overwrite a Response block output', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const responseNode = createMockNode('response', 'response')
+ const otherTerminal = createMockNode('other', 'function')
+
+ startNode.outgoingEdges.set('edge1', { target: 'response' })
+ startNode.outgoingEdges.set('edge2', { target: 'other' })
+
+ const dag = createMockDAG([startNode, responseNode, otherTerminal])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['response', 'other']
+ return []
+ })
+
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'response') {
+ return {
+ nodeId,
+ output: { data: { response: true }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ if (nodeId === 'other') {
+ await new Promise((resolve) => setTimeout(resolve, 1))
+ return { nodeId, output: { other: true }, isFinalOutput: true }
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { response: true }, status: 200, headers: {} })
+ })
+
+ it('should honor locked Response output even when a parallel node throws an error', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const responseNode = createMockNode('response', 'response')
+ const errorNode = createMockNode('error-node', 'function')
+
+ startNode.outgoingEdges.set('edge1', { target: 'response' })
+ startNode.outgoingEdges.set('edge2', { target: 'error-node' })
+
+ const dag = createMockDAG([startNode, responseNode, errorNode])
+ const context = createMockContext()
+ const edgeManager = createMockEdgeManager((node) => {
+ if (node.id === 'start') return ['response', 'error-node']
+ return []
+ })
+
+ const nodeOrchestrator = {
+ executionCount: 0,
+ executeNode: vi.fn().mockImplementation(async (_ctx: ExecutionContext, nodeId: string) => {
+ nodeOrchestrator.executionCount++
+ if (nodeId === 'response') {
+ return {
+ nodeId,
+ output: { data: { ok: true }, status: 200, headers: {} },
+ isFinalOutput: true,
+ }
+ }
+ if (nodeId === 'error-node') {
+ await new Promise((resolve) => setTimeout(resolve, 1))
+ throw new Error('Parallel branch failed')
+ }
+ return { nodeId, output: {}, isFinalOutput: false }
+ }),
+ handleNodeCompletion: vi.fn(),
+ } as unknown as MockNodeOrchestrator
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run('start')
+
+ expect(result.success).toBe(true)
+ expect(result.output).toEqual({ data: { ok: true }, status: 200, headers: {} })
+ })
+ })
+
describe('Cancellation flag behavior', () => {
it('should set cancelledFlag when abort signal fires', async () => {
const abortController = new AbortController()
diff --git a/apps/sim/executor/execution/engine.ts b/apps/sim/executor/execution/engine.ts
index a420c5df7dd..756ab0a03b3 100644
--- a/apps/sim/executor/execution/engine.ts
+++ b/apps/sim/executor/execution/engine.ts
@@ -23,6 +23,7 @@ export class ExecutionEngine {
private executing = new Set>()
private queueLock = Promise.resolve()
private finalOutput: NormalizedBlockOutput = {}
+ private responseOutputLocked = false
private pausedBlocks: Map = new Map()
private allowResumeTriggers: boolean
private cancelledFlag = false
@@ -127,8 +128,7 @@ export class ExecutionEngine {
await this.waitForAllExecutions()
}
- // Rethrow the captured error so it's handled by the catch block
- if (this.errorFlag && this.executionError) {
+ if (this.errorFlag && this.executionError && !this.responseOutputLocked) {
throw this.executionError
}
@@ -399,6 +399,12 @@ export class ExecutionEngine {
return
}
+ if (this.stoppedEarlyFlag && this.responseOutputLocked) {
+ // Workflow already ended via Response block. Skip state persistence (setBlockOutput),
+ // parallel/loop scope tracking, and edge propagation — no downstream blocks will run.
+ return
+ }
+
if (output._pauseMetadata) {
const pauseMetadata = output._pauseMetadata
this.pausedBlocks.set(pauseMetadata.contextId, pauseMetadata)
@@ -410,7 +416,17 @@ export class ExecutionEngine {
await this.nodeOrchestrator.handleNodeCompletion(this.context, nodeId, output)
- if (isFinalOutput) {
+ const isResponseBlock = node.block.metadata?.id === BlockType.RESPONSE
+ if (isResponseBlock) {
+ if (!this.responseOutputLocked) {
+ this.finalOutput = output
+ this.responseOutputLocked = true
+ }
+ this.stoppedEarlyFlag = true
+ return
+ }
+
+ if (isFinalOutput && !this.responseOutputLocked) {
this.finalOutput = output
}