Skip to content

Commit 5d4f070

Browse files
authored
fix(lightspeed): update how tool call info is fetched after the query response update (#2607)
1 parent 4e40bf1 commit 5d4f070

8 files changed

Lines changed: 444 additions & 38 deletions

File tree

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@red-hat-developer-hub/backstage-plugin-lightspeed': patch
3+
---
4+
5+
update how tool call info is retrived

workspaces/lightspeed/plugins/lightspeed/src/components/ToolCallContent.tsx

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import { WrenchIcon } from '@patternfly/react-icons';
3030

3131
import { useTranslation } from '../hooks/useTranslation';
3232
import { ToolCall } from '../types';
33+
import { formatToolResponseForMarkdown } from '../utils/formatToolResponseForMarkdown';
3334

3435
interface ToolCallContentProps {
3536
toolCall: ToolCall;
@@ -55,7 +56,10 @@ export const ToolCallContent = ({
5556
const getParameterKeys = (): string[] => {
5657
if (!toolCall.arguments) return [];
5758
return Object.keys(toolCall.arguments).filter(
58-
key => toolCall.arguments[key] !== '' && toolCall.arguments[key] !== null,
59+
key =>
60+
key !== 'server_label' &&
61+
toolCall.arguments[key] !== '' &&
62+
toolCall.arguments[key] !== null,
5963
);
6064
};
6165

@@ -69,6 +73,14 @@ export const ToolCallContent = ({
6973
const thinkingTime = getThinkingTime();
7074
const parameterKeys = getParameterKeys();
7175

76+
const mcpServerTitle = (() => {
77+
const label = toolCall.arguments?.server_label;
78+
if (typeof label === 'string' && label.trim() !== '') {
79+
return label.trim();
80+
}
81+
return t('toolCall.mcpServer');
82+
})();
83+
7284
return (
7385
<Flex
7486
direction={{ default: 'column' }}
@@ -120,7 +132,7 @@ export const ToolCallContent = ({
120132
marginRight: '0.5em',
121133
}}
122134
/>
123-
<strong>{t('toolCall.mcpServer')}</strong>
135+
<strong>{mcpServerTitle}</strong>
124136
</Content>
125137
{toolCall.executionTime !== undefined && (
126138
<Content component={ContentVariants.small}>
@@ -215,12 +227,14 @@ export const ToolCallContent = ({
215227
>
216228
<FlexItem>
217229
<Message
218-
content={toolCall.response}
230+
content={formatToolResponseForMarkdown(
231+
toolCall.response,
232+
)}
219233
role={role}
220234
codeBlockProps={{
221235
isExpandable: true,
222236
expandableSectionProps: {
223-
truncateMaxLines: 1,
237+
truncateMaxLines: 12,
224238
},
225239
}}
226240
/>

workspaces/lightspeed/plugins/lightspeed/src/components/__tests__/ToolCallContent.test.tsx

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,4 +179,19 @@ describe('ToolCallContent', () => {
179179
const parentContent = mcpServerText.closest('p');
180180
expect(parentContent?.querySelector('svg')).toBeInTheDocument();
181181
});
182+
183+
test('should show server_label from arguments instead of default MCP Server label', () => {
184+
const toolCall: ToolCall = {
185+
...baseToolCall,
186+
arguments: {
187+
...baseToolCall.arguments,
188+
server_label: 'mcp::backstage',
189+
},
190+
};
191+
192+
render(<ToolCallContent toolCall={toolCall} />);
193+
194+
expect(screen.getByText('mcp::backstage')).toBeInTheDocument();
195+
expect(screen.queryByText('MCP Server')).not.toBeInTheDocument();
196+
});
182197
});

workspaces/lightspeed/plugins/lightspeed/src/hooks/__tests__/useConversationMessages.test.tsx

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -560,6 +560,143 @@ data: {"event": "token", "data": {"id": 2, "token": ""}}\n
560560
expect(onComplete).toHaveBeenCalledWith('Hi from conversation 1!');
561561
});
562562

563+
it('should parse MCP-style tool_call and tool_result (name/args and content)', async () => {
564+
const toolCallId = 'mcp_list_67d3a067-e262-4bef-b467-6c82f622bd4a';
565+
const mcpStream = createSSEStream([
566+
{
567+
event: 'start',
568+
data: { conversation_id: 'conv-mcp' },
569+
},
570+
{
571+
event: 'tool_call',
572+
data: {
573+
id: toolCallId,
574+
name: 'mcp_list_tools',
575+
args: { server_label: 'mcp-integration-tools' },
576+
type: 'mcp_list_tools',
577+
},
578+
},
579+
{
580+
event: 'tool_result',
581+
data: {
582+
id: toolCallId,
583+
status: 'success',
584+
content: '{"server_label":"mcp-integration-tools","tools":[]}',
585+
},
586+
},
587+
{
588+
event: 'token',
589+
data: { id: 0, token: 'Done.' },
590+
},
591+
]);
592+
593+
const mockApi = {
594+
createMessage: jest.fn().mockResolvedValue({
595+
read: jest
596+
.fn()
597+
.mockResolvedValueOnce({
598+
done: false,
599+
value: new TextEncoder().encode(mcpStream),
600+
})
601+
.mockResolvedValueOnce({ done: true, value: null }),
602+
}),
603+
};
604+
(useApi as jest.Mock).mockReturnValue(mockApi);
605+
606+
const { result } = renderHook(
607+
() =>
608+
useConversationMessages(
609+
'conv-mcp',
610+
'test-user',
611+
'gpt-4',
612+
'openai',
613+
'user.png',
614+
),
615+
{ wrapper },
616+
);
617+
618+
await act(async () => {
619+
await result.current.handleInputPrompt('List MCP tools');
620+
});
621+
622+
await waitFor(() => {
623+
const msgs = result.current.conversations['conv-mcp'];
624+
const bot = msgs?.[1];
625+
expect(bot?.toolCalls).toHaveLength(1);
626+
expect(bot?.toolCalls?.[0]).toEqual(
627+
expect.objectContaining({
628+
id: toolCallId,
629+
toolName: 'mcp_list_tools',
630+
arguments: { server_label: 'mcp-integration-tools' },
631+
response: '{"server_label":"mcp-integration-tools","tools":[]}',
632+
isLoading: false,
633+
}),
634+
);
635+
expect(bot?.content).toContain('Done.');
636+
});
637+
});
638+
639+
it('should complete legacy tool_result when response field is omitted', async () => {
640+
const legacyStream = createSSEStream([
641+
{
642+
event: 'start',
643+
data: { conversation_id: 'conv-legacy-res' },
644+
},
645+
{
646+
event: 'tool_call',
647+
data: {
648+
id: 1,
649+
token: { tool_name: 'fetch-techdocs', arguments: { owner: 'a' } },
650+
},
651+
},
652+
{
653+
event: 'tool_result',
654+
data: { id: 1, token: { tool_name: 'fetch-techdocs' } },
655+
},
656+
]);
657+
658+
const mockApi = {
659+
createMessage: jest.fn().mockResolvedValue({
660+
read: jest
661+
.fn()
662+
.mockResolvedValueOnce({
663+
done: false,
664+
value: new TextEncoder().encode(legacyStream),
665+
})
666+
.mockResolvedValueOnce({ done: true, value: null }),
667+
}),
668+
};
669+
(useApi as jest.Mock).mockReturnValue(mockApi);
670+
671+
const { result } = renderHook(
672+
() =>
673+
useConversationMessages(
674+
'conv-legacy-res',
675+
'test-user',
676+
'gpt-4',
677+
'openai',
678+
'user.png',
679+
),
680+
{ wrapper },
681+
);
682+
683+
await act(async () => {
684+
await result.current.handleInputPrompt('techdocs');
685+
});
686+
687+
await waitFor(() => {
688+
const msgs = result.current.conversations['conv-legacy-res'];
689+
const bot = msgs?.[1];
690+
expect(bot?.toolCalls?.[0]).toEqual(
691+
expect.objectContaining({
692+
toolName: 'fetch-techdocs',
693+
response: '',
694+
isLoading: false,
695+
}),
696+
);
697+
});
698+
});
699+
563700
it('should resume streaming for the first conversation after switching back and complete', async () => {
564701
const onComplete = jest.fn();
565702

0 commit comments

Comments
 (0)