createSession method

  1. @override
Future<InferenceModelSession> createSession({
  1. double temperature = 0.8,
  2. int randomSeed = 1,
  3. int topK = 1,
  4. double? topP,
  5. String? loraPath,
})
override

Creates a new InferenceModelSession for generation.

temperature, randomSeed, topK, topP — parameters for sampling. loraPath — optional path to LoRA model.

Implementation

@override
Future<InferenceModelSession> createSession({
  double temperature = 0.8,
  int randomSeed = 1,
  int topK = 1,
  double? topP,
  String? loraPath,
}) async {
  if (_initCompleter case Completer<InferenceModelSession> completer) {
    return completer.future;
  }
  final completer = _initCompleter = Completer<InferenceModelSession>();
  try {
    final fileset = await FilesetResolver.forGenAiTasks(
            'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-genai/wasm'.toJS)
        .toDart;

    final loraPathToUse = loraPath ?? modelManager._loraPath;
    final hasLoraParams = loraPathToUse != null && loraRanks != null;

    final config = LlmInferenceOptions(
      baseOptions:
          LlmInferenceBaseOptions(modelAssetPath: modelManager._path),
      maxTokens: maxTokens,
      randomSeed: randomSeed,
      topK: topK,
      temperature: temperature,
      topP: topP,
      supportedLoraRanks:
          !hasLoraParams ? null : Int32List.fromList(loraRanks!).toJS,
      loraPath: !hasLoraParams ? null : loraPathToUse,
    );

    final llmInference =
        await LlmInference.createFromOptions(fileset, config).toDart;

    final session = this.session = WebModelSession(
      modelType: modelType,
      llmInference: llmInference,
      onClose: onClose,
    );
    completer.complete(session);
    return session;
  } catch (e) {
    throw Exception("Failed to create session: $e");
  }
}