createModel method

  1. @override
Future<InferenceModel> createModel({
  1. required ModelType modelType,
  2. ModelFileType fileType = ModelFileType.task,
  3. int maxTokens = 1024,
  4. PreferredBackend? preferredBackend,
  5. List<int>? loraRanks,
  6. int? maxNumImages,
  7. bool supportImage = false,
  8. bool supportAudio = false,
})
override

Creates and returns a new InferenceModel instance.

modelType — model type to create. maxTokens — maximum context length for the model. preferredBackend — backend preference (e.g., CPU, GPU). loraRanks — optional supported LoRA ranks. maxNumImages — maximum number of images (for multimodal models). supportImage — whether the model supports images. supportAudio — whether the model supports audio (Gemma 3n E4B only).

Implementation

@override
Future<InferenceModel> createModel({
  required ModelType modelType,
  ModelFileType fileType = ModelFileType.task,
  int maxTokens = 1024,
  PreferredBackend? preferredBackend,
  List<int>? loraRanks,
  int? maxNumImages,
  bool supportImage = false,
  bool supportAudio = false,
}) async {
  // Check active model
  final activeModel = _modelManager.activeInferenceModel;
  if (activeModel == null) {
    throw StateError(
      'No active inference model set. Use `FlutterGemma.installModel()` or `modelManager.setActiveModel()` first',
    );
  }

  // Check if singleton exists and matches active model + runtime params
  if (_initCompleter != null &&
      _initializedModel != null &&
      _lastActiveInferenceSpec != null) {
    final currentSpec = _lastActiveInferenceSpec!;
    final requestedSpec = activeModel as InferenceModelSpec;
    final currentModel = _initializedModel as DesktopInferenceModel?;

    final modelChanged = currentSpec.name != requestedSpec.name;
    final paramsChanged = currentModel != null &&
        (currentModel.supportImage != supportImage ||
            currentModel.supportAudio != supportAudio ||
            currentModel.maxTokens != maxTokens);

    if (modelChanged || paramsChanged) {
      debugPrint(
          'Model recreation: modelChanged=$modelChanged, paramsChanged=$paramsChanged');
      await _initializedModel?.close();
      _initCompleter = null;
      _initializedModel = null;
      _lastActiveInferenceSpec = null;
    } else {
      debugPrint('Reusing existing model instance for ${requestedSpec.name}');
      return _initCompleter!.future;
    }
  }

  // Return existing completer if initialization in progress
  if (_initCompleter case Completer<InferenceModel> completer) {
    return completer.future;
  }

  final completer = _initCompleter = Completer<InferenceModel>();

  try {
    // Verify model is installed
    final isInstalled = await _modelManager.isModelInstalled(activeModel);
    if (!isInstalled) {
      throw Exception('Active model is no longer installed');
    }

    // Get model file path
    final modelFilePaths = await _modelManager.getModelFilePaths(activeModel);
    if (modelFilePaths == null || modelFilePaths.isEmpty) {
      throw Exception('Model file paths not found');
    }

    final modelPath = modelFilePaths.values.first;
    debugPrint('[FlutterGemmaDesktop] Using model: $modelPath');

    // Get cache dir for faster reloads
    final cacheDir = (await getApplicationSupportDirectory()).path;

    // Initialize via dart:ffi → C API (no JRE, no gRPC)
    final ffiClient = LiteRtLmFfiClient();
    final backend = switch (preferredBackend) {
      PreferredBackend.cpu => 'cpu',
      PreferredBackend.gpu || null => 'gpu',
      PreferredBackend.npu => throw UnsupportedError(
          'PreferredBackend.npu is only supported on Android with .litertlm '
          'models; not available on desktop.',
        ),
    };
    await ffiClient.initialize(
      modelPath: modelPath,
      backend: backend,
      maxTokens: maxTokens,
      cacheDir: cacheDir,
      enableVision: supportImage,
      maxNumImages: supportImage ? (maxNumImages ?? 1) : 0,
      enableAudio: supportAudio,
    );

    // Create model instance
    final model = _initializedModel = DesktopInferenceModel(
      ffiClient: ffiClient,
      maxTokens: maxTokens,
      modelType: modelType,
      fileType: fileType,
      supportImage: supportImage,
      supportAudio: supportAudio,
      onClose: () {
        _initializedModel = null;
        _initCompleter = null;
        _lastActiveInferenceSpec = null;
      },
    );

    _lastActiveInferenceSpec = activeModel as InferenceModelSpec;

    completer.complete(model);
    return model;
  } catch (e, st) {
    completer.completeError(e, st);
    _initCompleter = null;
    rethrow;
  }
}