Skip to content

Commit b6d0646

Browse files
DevDesai444sanchitmonga22
authored andcommitted
Use loaded LLM context length for Flutter validation
1 parent 89757fd commit b6d0646

1 file changed

Lines changed: 28 additions & 15 deletions

File tree

sdk/runanywhere-flutter/packages/runanywhere/lib/native/dart_bridge_llm.dart

Lines changed: 28 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import 'dart:isolate'; // Keep for non-streaming generation
1919

2020
import 'package:ffi/ffi.dart';
2121
import 'package:runanywhere/features/llm/llm_configuration.dart';
22+
import 'package:runanywhere/foundation/error_types/sdk_error.dart';
2223
import 'package:runanywhere/foundation/logging/sdk_logger.dart';
2324
import 'package:runanywhere/native/ffi_types.dart';
2425
import 'package:runanywhere/native/platform_loader.dart';
@@ -245,18 +246,19 @@ class DartBridgeLLM {
245246
double temperature = 0.7,
246247
String? systemPrompt,
247248
}) async {
248-
_validateGenerationParameters(
249-
maxTokens: maxTokens,
250-
temperature: temperature,
251-
systemPrompt: systemPrompt,
252-
);
253-
254249
final handle = getHandle();
255250

256251
if (!isLoaded) {
257252
throw StateError('No LLM model loaded. Call loadModel() first.');
258253
}
259254

255+
_validateGenerationParameters(
256+
contextLength: _requireLoadedContextLength(),
257+
maxTokens: maxTokens,
258+
temperature: temperature,
259+
systemPrompt: systemPrompt,
260+
);
261+
260262
// Run FFI call in a separate isolate to avoid heap corruption
261263
// from C++ background threads (Metal GPU operations)
262264
final handleAddress = handle.address;
@@ -294,19 +296,20 @@ class DartBridgeLLM {
294296
double temperature = 0.7,
295297
String? systemPrompt,
296298
}) {
299+
final handle = getHandle();
300+
301+
if (!isLoaded) {
302+
throw StateError('No LLM model loaded. Call loadModel() first.');
303+
}
304+
297305
_validateGenerationParameters(
306+
contextLength: _requireLoadedContextLength(),
298307
maxTokens: maxTokens,
299308
temperature: temperature,
300309
systemPrompt: systemPrompt,
301310
streamingEnabled: true,
302311
);
303312

304-
final handle = getHandle();
305-
306-
if (!isLoaded) {
307-
throw StateError('No LLM model loaded. Call loadModel() first.');
308-
}
309-
310313
// Create stream controller for emitting tokens to the caller
311314
final controller = StreamController<String>();
312315

@@ -384,16 +387,26 @@ class DartBridgeLLM {
384387
}
385388
}
386389

390+
int _requireLoadedContextLength() {
391+
final contextLength = _loadedContextLength;
392+
if (contextLength != null && contextLength > 0) {
393+
return contextLength;
394+
}
395+
396+
throw SDKError.validationFailed(
397+
'Loaded model is missing context length metadata for maxTokens validation',
398+
);
399+
}
400+
387401
void _validateGenerationParameters({
402+
required int contextLength,
388403
required int maxTokens,
389404
required double temperature,
390405
String? systemPrompt,
391406
bool streamingEnabled = false,
392407
}) {
393-
final contextLength = _loadedContextLength;
394-
395408
LLMConfiguration(
396-
contextLength: contextLength ?? 32768,
409+
contextLength: contextLength,
397410
maxTokens: maxTokens,
398411
temperature: temperature,
399412
systemPrompt: systemPrompt,

0 commit comments

Comments
 (0)