1
0

feat: Implement a modular skill system with hotword detection, streaming text output, and enhanced logging.

This commit is contained in:
2026-02-27 00:39:32 +01:00
parent 4ee4bc5457
commit a365448399
18 changed files with 451 additions and 23 deletions

View File

@@ -4,6 +4,7 @@ using System.Text.Json.Serialization;
using Toak.Api.Models;
using Toak.Serialization;
using Toak.Core;
namespace Toak.Api;
@@ -39,7 +40,9 @@ public class GroqApiClient
content.Add(new StringContent(firstLang), "language");
}
Logger.LogDebug($"Sending Whisper API request ({modelToUse})...");
var response = await _httpClient.PostAsync("audio/transcriptions", content);
Logger.LogDebug($"Whisper API response status: {response.StatusCode}");
if (!response.IsSuccessStatusCode)
{
@@ -67,7 +70,9 @@ public class GroqApiClient
var jsonContent = new StringContent(JsonSerializer.Serialize(requestBody, AppJsonSerializerContext.Default.LlamaRequest), System.Text.Encoding.UTF8, "application/json");
Logger.LogDebug($"Sending Llama API request (model: {requestBody.Model})...");
var response = await _httpClient.PostAsync("chat/completions", jsonContent);
Logger.LogDebug($"Llama API response status: {response.StatusCode}");
if (!response.IsSuccessStatusCode)
{
@@ -80,4 +85,55 @@ public class GroqApiClient
return result?.Choices?.FirstOrDefault()?.Message?.Content ?? string.Empty;
}
public async IAsyncEnumerable<string> RefineTextStreamAsync(string rawTranscript, string systemPrompt, string model = "openai/gpt-oss-20b")
{
var requestBody = new LlamaRequest
{
Model = string.IsNullOrWhiteSpace(model) ? "openai/gpt-oss-20b" : model,
Temperature = 0.0,
Stream = true,
Messages = new[]
{
new LlamaRequestMessage { Role = "system", Content = systemPrompt },
new LlamaRequestMessage { Role = "user", Content = $"<transcript>{rawTranscript}</transcript>" }
}
};
var jsonContent = new StringContent(JsonSerializer.Serialize(requestBody, AppJsonSerializerContext.Default.LlamaRequest), System.Text.Encoding.UTF8, "application/json");
using var request = new HttpRequestMessage(HttpMethod.Post, "chat/completions") { Content = jsonContent };
request.Headers.Accept.Add(new MediaTypeWithQualityHeaderValue("text/event-stream"));
Logger.LogDebug($"Sending Llama Steam API request (model: {requestBody.Model})...");
using var response = await _httpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead);
Logger.LogDebug($"Llama Stream API response status: {response.StatusCode}");
if (!response.IsSuccessStatusCode)
{
var error = await response.Content.ReadAsStringAsync();
throw new Exception($"Llama API Error: {response.StatusCode} - {error}");
}
using var stream = await response.Content.ReadAsStreamAsync();
using var reader = new StreamReader(stream);
string? line;
while ((line = await reader.ReadLineAsync()) != null)
{
if (string.IsNullOrWhiteSpace(line)) continue;
if (line.StartsWith("data: "))
{
var data = line.Substring("data: ".Length).Trim();
if (data == "[DONE]") break;
var chunk = JsonSerializer.Deserialize(data, AppJsonSerializerContext.Default.LlamaStreamResponse);
var content = chunk?.Choices?.FirstOrDefault()?.Delta?.Content;
if (!string.IsNullOrEmpty(content))
{
yield return content;
}
}
}
}
}