whisper4dart_bindings_generated
library
Typedefs
-
FILE
= _IO_FILE
-
-
ggml_abort_callback
= Pointer<NativeFunction<Bool Function(Pointer<Void> data)>>
-
Abort callback
If not NULL, called before ggml computation
If it returns true, the computation is aborted
-
ggml_backend_buffer_t
= Pointer<ggml_backend_buffer>
-
-
ggml_backend_buffer_type_t
= Pointer<ggml_backend_buffer_type>
-
-
ggml_backend_dev_t
= Pointer<ggml_backend_device>
-
-
ggml_backend_eval_callback
= Pointer<NativeFunction<Bool Function(Int node_index, Pointer<ggml_tensor> t1, Pointer<ggml_tensor> t2, Pointer<Void> user_data)>>
-
-
ggml_backend_event_t
= Pointer<ggml_backend_event>
-
-
ggml_backend_graph_plan_t
= Pointer<Void>
-
-
ggml_backend_reg_t
= Pointer<ggml_backend_reg>
-
-
ggml_backend_sched_eval_callback
= Pointer<NativeFunction<Bool Function(Pointer<ggml_tensor> t, Bool ask, Pointer<Void> user_data)>>
-
Evaluation callback for each node in the graph (set with ggml_backend_sched_set_eval_callback)
when ask == true, the scheduler wants to know if the user wants to observe this node
this allows the scheduler to batch nodes together in order to evaluate them in a single call
-
ggml_backend_sched_t
= Pointer<ggml_backend_sched>
-
The backend scheduler allows for multiple backend devices to be used together
Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
The backends are selected based on:
-
ggml_backend_t
= Pointer<ggml_backend>
-
-
ggml_binary_op_f32_t
= Pointer<NativeFunction<Void Function(Int, Pointer<Float>, Pointer<Float>, Pointer<Float>)>>
-
-
ggml_custom1_op_f32_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor>, Pointer<ggml_tensor>)>>
-
-
ggml_custom1_op_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor> dst, Pointer<ggml_tensor> a, Int ith, Int nth, Pointer<Void> userdata)>>
-
custom operators v2
-
ggml_custom2_op_f32_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor>, Pointer<ggml_tensor>, Pointer<ggml_tensor>)>>
-
-
ggml_custom2_op_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor> dst, Pointer<ggml_tensor> a, Pointer<ggml_tensor> b, Int ith, Int nth, Pointer<Void> userdata)>>
-
-
ggml_custom3_op_f32_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor>, Pointer<ggml_tensor>, Pointer<ggml_tensor>, Pointer<ggml_tensor>)>>
-
-
ggml_custom3_op_t
= Pointer<NativeFunction<Void Function(Pointer<ggml_tensor> dst, Pointer<ggml_tensor> a, Pointer<ggml_tensor> b, Pointer<ggml_tensor> c, Int ith, Int nth, Pointer<Void> userdata)>>
-
-
ggml_fp16_t
= Uint16
-
ieee 754-2008 half-precision float16
todo: make this not an integral type
-
ggml_from_float_t
= Pointer<NativeFunction<Void Function(Pointer<Float> x, Pointer<Void> y, Int64 k)>>
-
-
ggml_guid_t
= Pointer<Pointer<Uint8>>
-
-
ggml_log_callback
= Pointer<NativeFunction<Void Function(Int32 level, Pointer<Char> text, Pointer<Void> user_data)>>
-
TODO these functions were sandwiched in the old optimization interface, is there a better place for them?
-
ggml_threadpool_t
= Pointer<ggml_threadpool>
-
-
ggml_to_float_t
= Pointer<NativeFunction<Void Function(Pointer<Void> x, Pointer<Float> y, Int64 k)>>
-
-
ggml_unary_op_f32_t
= Pointer<NativeFunction<Void Function(Int, Pointer<Float>, Pointer<Float>)>>
-
custom operators
-
ggml_vec_dot_t
= Pointer<NativeFunction<Void Function(Int n, Pointer<Float> s, Size bs, Pointer<Void> x, Size bx, Pointer<Void> y, Size by, Int nrc)>>
-
Internal types and functions exposed for tests and benchmarks
-
whisper_encoder_begin_callback
= Pointer<NativeFunction<Bool Function(Pointer<whisper_context> ctx, Pointer<whisper_state> state, Pointer<Void> user_data)>>
-
Encoder begin callback
If not NULL, called before the encoder starts
If it returns false, the computation is aborted
-
whisper_logits_filter_callback
= Pointer<NativeFunction<Void Function(Pointer<whisper_context> ctx, Pointer<whisper_state> state, Pointer<whisper_token_data> tokens, Int n_tokens, Pointer<Float> logits, Pointer<Void> user_data)>>
-
Logits filter callback
Can be used to modify the logits before sampling
If not NULL, called after applying temperature to logits
-
whisper_new_segment_callback
= Pointer<NativeFunction<Void Function(Pointer<whisper_context> ctx, Pointer<whisper_state> state, Int n_new, Pointer<Void> user_data)>>
-
Text segment callback
Called on every newly generated text segment
Use the whisper_full_...() functions to obtain the text segments
-
whisper_progress_callback
= Pointer<NativeFunction<Void Function(Pointer<whisper_context> ctx, Pointer<whisper_state> state, Int progress, Pointer<Void> user_data)>>
-
Progress callback
-
whisper_token
= Int32
-