Add Google Gemini API integration
- Add provider selection dropdown to ApiKeyHeader - Create googleGeminiClient.js for Gemini API interactions - Create aiProviderService.js abstraction layer for multiple AI providers - Update windowManager to store and manage provider selection - Update liveSummaryService and renderer to use provider abstraction - Add @google/generative-ai package dependency - Update sqliteClient to store provider preference in database - Support streaming responses for both OpenAI and Gemini models
This commit is contained in:
parent
ba8401345b
commit
5e14a32045
@ -29,6 +29,7 @@
|
|||||||
},
|
},
|
||||||
"license": "GPL-3.0",
|
"license": "GPL-3.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@google/generative-ai": "^0.24.1",
|
||||||
"axios": "^1.10.0",
|
"axios": "^1.10.0",
|
||||||
"better-sqlite3": "^9.4.3",
|
"better-sqlite3": "^9.4.3",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
|
@ -5,6 +5,7 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
apiKey: { type: String },
|
apiKey: { type: String },
|
||||||
isLoading: { type: Boolean },
|
isLoading: { type: Boolean },
|
||||||
errorMessage: { type: String },
|
errorMessage: { type: String },
|
||||||
|
selectedProvider: { type: String },
|
||||||
};
|
};
|
||||||
|
|
||||||
static styles = css`
|
static styles = css`
|
||||||
@ -45,11 +46,11 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
|
|
||||||
.container {
|
.container {
|
||||||
width: 285px;
|
width: 285px;
|
||||||
height: 220px;
|
min-height: 260px;
|
||||||
padding: 18px 20px;
|
padding: 18px 20px;
|
||||||
background: rgba(0, 0, 0, 0.3);
|
background: rgba(0, 0, 0, 0.3);
|
||||||
border-radius: 16px;
|
border-radius: 16px;
|
||||||
overflow: hidden;
|
overflow: visible;
|
||||||
position: relative;
|
position: relative;
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
@ -152,6 +153,46 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
outline: none;
|
outline: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.provider-select {
|
||||||
|
width: 100%;
|
||||||
|
height: 34px;
|
||||||
|
background: rgba(255, 255, 255, 0.1);
|
||||||
|
border-radius: 10px;
|
||||||
|
border: 1px solid rgba(255, 255, 255, 0.2);
|
||||||
|
padding: 0 10px;
|
||||||
|
color: white;
|
||||||
|
font-size: 12px;
|
||||||
|
font-weight: 400;
|
||||||
|
margin-bottom: 6px;
|
||||||
|
text-align: center;
|
||||||
|
cursor: pointer;
|
||||||
|
-webkit-appearance: none;
|
||||||
|
-moz-appearance: none;
|
||||||
|
appearance: none;
|
||||||
|
background-image: url('data:image/svg+xml;charset=US-ASCII,%3Csvg%20width%3D%2714%27%20height%3D%278%27%20viewBox%3D%270%200%2014%208%27%20xmlns%3D%27http%3A//www.w3.org/2000/svg%27%3E%3Cpath%20d%3D%27M1%201l6%206%206-6%27%20stroke%3D%27%23ffffff%27%20stroke-width%3D%271.5%27%20fill%3D%27none%27%20fill-rule%3D%27evenodd%27/%3E%3C/svg%3E');
|
||||||
|
background-repeat: no-repeat;
|
||||||
|
background-position: right 10px center;
|
||||||
|
background-size: 12px;
|
||||||
|
padding-right: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.provider-select:hover {
|
||||||
|
background-color: rgba(255, 255, 255, 0.15);
|
||||||
|
border-color: rgba(255, 255, 255, 0.3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.provider-select:focus {
|
||||||
|
outline: none;
|
||||||
|
background-color: rgba(255, 255, 255, 0.15);
|
||||||
|
border-color: rgba(255, 255, 255, 0.4);
|
||||||
|
}
|
||||||
|
|
||||||
|
.provider-select option {
|
||||||
|
background: #1a1a1a;
|
||||||
|
color: white;
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
.action-button {
|
.action-button {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 34px;
|
height: 34px;
|
||||||
@ -198,6 +239,15 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
font-weight: 500; /* Medium */
|
font-weight: 500; /* Medium */
|
||||||
margin: 10px 0;
|
margin: 10px 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.provider-label {
|
||||||
|
color: rgba(255, 255, 255, 0.7);
|
||||||
|
font-size: 11px;
|
||||||
|
font-weight: 400;
|
||||||
|
margin-bottom: 4px;
|
||||||
|
width: 100%;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
`;
|
`;
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
@ -208,6 +258,7 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
this.isLoading = false;
|
this.isLoading = false;
|
||||||
this.errorMessage = '';
|
this.errorMessage = '';
|
||||||
this.validatedApiKey = null;
|
this.validatedApiKey = null;
|
||||||
|
this.selectedProvider = 'openai';
|
||||||
|
|
||||||
this.handleMouseMove = this.handleMouseMove.bind(this);
|
this.handleMouseMove = this.handleMouseMove.bind(this);
|
||||||
this.handleMouseUp = this.handleMouseUp.bind(this);
|
this.handleMouseUp = this.handleMouseUp.bind(this);
|
||||||
@ -216,6 +267,7 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
this.handleInput = this.handleInput.bind(this);
|
this.handleInput = this.handleInput.bind(this);
|
||||||
this.handleAnimationEnd = this.handleAnimationEnd.bind(this);
|
this.handleAnimationEnd = this.handleAnimationEnd.bind(this);
|
||||||
this.handleUsePicklesKey = this.handleUsePicklesKey.bind(this);
|
this.handleUsePicklesKey = this.handleUsePicklesKey.bind(this);
|
||||||
|
this.handleProviderChange = this.handleProviderChange.bind(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
reset() {
|
reset() {
|
||||||
@ -223,11 +275,12 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
this.isLoading = false;
|
this.isLoading = false;
|
||||||
this.errorMessage = '';
|
this.errorMessage = '';
|
||||||
this.validatedApiKey = null;
|
this.validatedApiKey = null;
|
||||||
|
this.selectedProvider = 'openai';
|
||||||
this.requestUpdate();
|
this.requestUpdate();
|
||||||
}
|
}
|
||||||
|
|
||||||
async handleMouseDown(e) {
|
async handleMouseDown(e) {
|
||||||
if (e.target.tagName === 'INPUT' || e.target.tagName === 'BUTTON') {
|
if (e.target.tagName === 'INPUT' || e.target.tagName === 'BUTTON' || e.target.tagName === 'SELECT') {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -295,6 +348,13 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handleProviderChange(e) {
|
||||||
|
this.selectedProvider = e.target.value;
|
||||||
|
this.errorMessage = '';
|
||||||
|
console.log('Provider changed to:', this.selectedProvider);
|
||||||
|
this.requestUpdate();
|
||||||
|
}
|
||||||
|
|
||||||
handlePaste(e) {
|
handlePaste(e) {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
this.errorMessage = '';
|
this.errorMessage = '';
|
||||||
@ -343,21 +403,13 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
const apiKey = this.apiKey.trim();
|
const apiKey = this.apiKey.trim();
|
||||||
let isValid = false;
|
let isValid = false;
|
||||||
try {
|
try {
|
||||||
const isValid = await this.validateApiKey(this.apiKey.trim());
|
const isValid = await this.validateApiKey(this.apiKey.trim(), this.selectedProvider);
|
||||||
|
|
||||||
if (isValid) {
|
if (isValid) {
|
||||||
console.log('API key valid - checking system permissions...');
|
console.log('API key valid - starting slide out animation');
|
||||||
|
this.startSlideOutAnimation();
|
||||||
const permissionResult = await this.checkAndRequestPermissions();
|
this.validatedApiKey = this.apiKey.trim();
|
||||||
|
this.validatedProvider = this.selectedProvider;
|
||||||
if (permissionResult.success) {
|
|
||||||
console.log('All permissions granted - starting slide out animation');
|
|
||||||
this.startSlideOutAnimation();
|
|
||||||
this.validatedApiKey = this.apiKey.trim();
|
|
||||||
} else {
|
|
||||||
this.errorMessage = permissionResult.error || 'Permission setup required';
|
|
||||||
console.log('Permission setup incomplete:', permissionResult);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
this.errorMessage = 'Invalid API key - please check and try again';
|
this.errorMessage = 'Invalid API key - please check and try again';
|
||||||
console.log('API key validation failed');
|
console.log('API key validation failed');
|
||||||
@ -371,92 +423,69 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async validateApiKey(apiKey) {
|
async validateApiKey(apiKey, provider = 'openai') {
|
||||||
if (!apiKey || apiKey.length < 15) return false;
|
if (!apiKey || apiKey.length < 15) return false;
|
||||||
if (!apiKey.match(/^[A-Za-z0-9_-]+$/)) return false;
|
|
||||||
|
if (provider === 'openai') {
|
||||||
|
if (!apiKey.match(/^[A-Za-z0-9_-]+$/)) return false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('Validating OpenAI API key...');
|
||||||
|
|
||||||
try {
|
const response = await fetch('https://api.openai.com/v1/models', {
|
||||||
console.log('Validating API key with openai models endpoint...');
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
const response = await fetch('https://api.openai.com/v1/models', {
|
if (response.ok) {
|
||||||
headers: {
|
const data = await response.json();
|
||||||
'Content-Type': 'application/json',
|
|
||||||
Authorization: `Bearer ${apiKey}`,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.ok) {
|
const hasGPTModels = data.data && data.data.some(m => m.id.startsWith('gpt-'));
|
||||||
const data = await response.json();
|
if (hasGPTModels) {
|
||||||
|
console.log('OpenAI API key validation successful');
|
||||||
const hasGPTModels = data.data && data.data.some(m => m.id.startsWith('gpt-'));
|
return true;
|
||||||
if (hasGPTModels) {
|
} else {
|
||||||
console.log('API key validation successful - GPT models available');
|
console.log('API key valid but no GPT models available');
|
||||||
return true;
|
return false;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
console.log('API key valid but no GPT models available');
|
const errorData = await response.json().catch(() => ({}));
|
||||||
|
console.log('API key validation failed:', response.status, errorData.error?.message || 'Unknown error');
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} catch (error) {
|
||||||
const errorData = await response.json().catch(() => ({}));
|
console.error('API key validation network error:', error);
|
||||||
console.log('API key validation failed:', response.status, errorData.error?.message || 'Unknown error');
|
return apiKey.length >= 20; // Fallback for network issues
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} else if (provider === 'gemini') {
|
||||||
console.error('API key validation network error:', error);
|
// Gemini API keys typically start with 'AIza'
|
||||||
return apiKey.length >= 20; // Fallback for network issues
|
if (!apiKey.match(/^[A-Za-z0-9_-]+$/)) return false;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async checkAndRequestPermissions() {
|
|
||||||
if (!window.require) {
|
|
||||||
return { success: true };
|
|
||||||
}
|
|
||||||
|
|
||||||
const { ipcRenderer } = window.require('electron');
|
|
||||||
|
|
||||||
try {
|
|
||||||
const permissions = await ipcRenderer.invoke('check-system-permissions');
|
|
||||||
console.log('[Permissions] Current status:', permissions);
|
|
||||||
|
|
||||||
if (!permissions.needsSetup) {
|
try {
|
||||||
return { success: true };
|
console.log('Validating Gemini API key...');
|
||||||
}
|
|
||||||
|
|
||||||
if (!permissions.microphone) {
|
|
||||||
console.log('[Permissions] Requesting microphone permission...');
|
|
||||||
const micResult = await ipcRenderer.invoke('request-microphone-permission');
|
|
||||||
|
|
||||||
if (!micResult.success) {
|
// Test the API key with a simple models list request
|
||||||
console.log('[Permissions] Microphone permission denied');
|
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`);
|
||||||
await ipcRenderer.invoke('open-system-preferences', 'microphone');
|
|
||||||
return {
|
if (response.ok) {
|
||||||
success: false,
|
const data = await response.json();
|
||||||
error: 'Please grant microphone access in System Preferences'
|
if (data.models && data.models.length > 0) {
|
||||||
};
|
console.log('Gemini API key validation successful');
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (!permissions.screen) {
|
|
||||||
console.log('[Permissions] Screen recording permission needed');
|
|
||||||
await ipcRenderer.invoke('open-system-preferences', 'screen-recording');
|
|
||||||
|
|
||||||
this.errorMessage = 'Please grant screen recording permission and try again';
|
console.log('Gemini API key validation failed');
|
||||||
this.requestUpdate();
|
return false;
|
||||||
|
} catch (error) {
|
||||||
return {
|
console.error('Gemini API key validation network error:', error);
|
||||||
success: false,
|
return apiKey.length >= 20; // Fallback
|
||||||
error: 'Please grant screen recording access in System Preferences'
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return { success: true };
|
|
||||||
} catch (error) {
|
|
||||||
console.error('[Permissions] Error checking/requesting permissions:', error);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: 'Failed to check permissions'
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
startSlideOutAnimation() {
|
startSlideOutAnimation() {
|
||||||
@ -489,9 +518,13 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
|
|
||||||
if (this.validatedApiKey) {
|
if (this.validatedApiKey) {
|
||||||
if (window.require) {
|
if (window.require) {
|
||||||
window.require('electron').ipcRenderer.invoke('api-key-validated', this.validatedApiKey);
|
window.require('electron').ipcRenderer.invoke('api-key-validated', {
|
||||||
|
apiKey: this.validatedApiKey,
|
||||||
|
provider: this.validatedProvider || 'openai'
|
||||||
|
});
|
||||||
}
|
}
|
||||||
this.validatedApiKey = null;
|
this.validatedApiKey = null;
|
||||||
|
this.validatedProvider = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -510,6 +543,7 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
|
|
||||||
render() {
|
render() {
|
||||||
const isButtonDisabled = this.isLoading || !this.apiKey || !this.apiKey.trim();
|
const isButtonDisabled = this.isLoading || !this.apiKey || !this.apiKey.trim();
|
||||||
|
console.log('Rendering with provider:', this.selectedProvider);
|
||||||
|
|
||||||
return html`
|
return html`
|
||||||
<div class="container" @mousedown=${this.handleMouseDown}>
|
<div class="container" @mousedown=${this.handleMouseDown}>
|
||||||
@ -522,10 +556,21 @@ export class ApiKeyHeader extends LitElement {
|
|||||||
|
|
||||||
<div class="form-content">
|
<div class="form-content">
|
||||||
<div class="error-message">${this.errorMessage}</div>
|
<div class="error-message">${this.errorMessage}</div>
|
||||||
|
<div class="provider-label">Select AI Provider:</div>
|
||||||
|
<select
|
||||||
|
class="provider-select"
|
||||||
|
.value=${this.selectedProvider || 'openai'}
|
||||||
|
@change=${this.handleProviderChange}
|
||||||
|
?disabled=${this.isLoading}
|
||||||
|
tabindex="0"
|
||||||
|
>
|
||||||
|
<option value="openai" ?selected=${this.selectedProvider === 'openai'}>OpenAI</option>
|
||||||
|
<option value="gemini" ?selected=${this.selectedProvider === 'gemini'}>Google Gemini</option>
|
||||||
|
</select>
|
||||||
<input
|
<input
|
||||||
type="password"
|
type="password"
|
||||||
class="api-input"
|
class="api-input"
|
||||||
placeholder="Enter your OpenAI API key"
|
placeholder=${this.selectedProvider === 'openai' ? "Enter your OpenAI API key" : "Enter your Gemini API key"}
|
||||||
.value=${this.apiKey || ''}
|
.value=${this.apiKey || ''}
|
||||||
@input=${this.handleInput}
|
@input=${this.handleInput}
|
||||||
@keypress=${this.handleKeyPress}
|
@keypress=${this.handleKeyPress}
|
||||||
|
377
src/common/services/aiProviderService.js
Normal file
377
src/common/services/aiProviderService.js
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
const { createOpenAiGenerativeClient, getOpenAiGenerativeModel } = require('./openAiClient.js');
|
||||||
|
const { createGeminiClient, getGeminiGenerativeModel, createGeminiChat } = require('./googleGeminiClient.js');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an AI client based on the provider
|
||||||
|
* @param {string} apiKey - The API key
|
||||||
|
* @param {string} provider - The provider ('openai' or 'gemini')
|
||||||
|
* @returns {object} The AI client
|
||||||
|
*/
|
||||||
|
function createAIClient(apiKey, provider = 'openai') {
|
||||||
|
switch (provider) {
|
||||||
|
case 'openai':
|
||||||
|
return createOpenAiGenerativeClient(apiKey);
|
||||||
|
case 'gemini':
|
||||||
|
return createGeminiClient(apiKey);
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported AI provider: ${provider}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a generative model based on the provider
|
||||||
|
* @param {object} client - The AI client
|
||||||
|
* @param {string} provider - The provider ('openai' or 'gemini')
|
||||||
|
* @param {string} model - The model name (optional)
|
||||||
|
* @returns {object} The model object
|
||||||
|
*/
|
||||||
|
function getGenerativeModel(client, provider = 'openai', model) {
|
||||||
|
switch (provider) {
|
||||||
|
case 'openai':
|
||||||
|
return getOpenAiGenerativeModel(client, model || 'gpt-4.1');
|
||||||
|
case 'gemini':
|
||||||
|
return getGeminiGenerativeModel(client, model || 'gemini-2.5-flash');
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported AI provider: ${provider}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes a chat completion request based on the provider
|
||||||
|
* @param {object} params - Request parameters
|
||||||
|
* @returns {Promise<object>} The completion response
|
||||||
|
*/
|
||||||
|
async function makeChatCompletion({ apiKey, provider = 'openai', messages, temperature = 0.7, maxTokens = 1024, model, stream = false }) {
|
||||||
|
if (provider === 'openai') {
|
||||||
|
const fetchUrl = 'https://api.openai.com/v1/chat/completions';
|
||||||
|
const response = await fetch(fetchUrl, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: model || 'gpt-4.1',
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
stream,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return {
|
||||||
|
content: result.choices[0].message.content.trim(),
|
||||||
|
raw: result
|
||||||
|
};
|
||||||
|
} else if (provider === 'gemini') {
|
||||||
|
const client = createGeminiClient(apiKey);
|
||||||
|
const genModel = getGeminiGenerativeModel(client, model || 'gemini-2.5-flash');
|
||||||
|
|
||||||
|
// Convert OpenAI format messages to Gemini format
|
||||||
|
const parts = [];
|
||||||
|
for (const message of messages) {
|
||||||
|
if (message.role === 'system') {
|
||||||
|
parts.push(message.content);
|
||||||
|
} else if (message.role === 'user') {
|
||||||
|
if (typeof message.content === 'string') {
|
||||||
|
parts.push(message.content);
|
||||||
|
} else if (Array.isArray(message.content)) {
|
||||||
|
// Handle multimodal content
|
||||||
|
for (const part of message.content) {
|
||||||
|
if (part.type === 'text') {
|
||||||
|
parts.push(part.text);
|
||||||
|
} else if (part.type === 'image_url' && part.image_url?.url) {
|
||||||
|
// Extract base64 data from data URL
|
||||||
|
const base64Match = part.image_url.url.match(/^data:(.+);base64,(.+)$/);
|
||||||
|
if (base64Match) {
|
||||||
|
parts.push({
|
||||||
|
inlineData: {
|
||||||
|
mimeType: base64Match[1],
|
||||||
|
data: base64Match[2]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await genModel.generateContent(parts);
|
||||||
|
return {
|
||||||
|
content: result.response.text(),
|
||||||
|
raw: result
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
throw new Error(`Unsupported AI provider: ${provider}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes a chat completion request with Portkey support
|
||||||
|
* @param {object} params - Request parameters including Portkey options
|
||||||
|
* @returns {Promise<object>} The completion response
|
||||||
|
*/
|
||||||
|
async function makeChatCompletionWithPortkey({
|
||||||
|
apiKey,
|
||||||
|
provider = 'openai',
|
||||||
|
messages,
|
||||||
|
temperature = 0.7,
|
||||||
|
maxTokens = 1024,
|
||||||
|
model,
|
||||||
|
usePortkey = false,
|
||||||
|
portkeyVirtualKey = null
|
||||||
|
}) {
|
||||||
|
if (!usePortkey) {
|
||||||
|
return makeChatCompletion({ apiKey, provider, messages, temperature, maxTokens, model });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Portkey is only supported for OpenAI currently
|
||||||
|
if (provider !== 'openai') {
|
||||||
|
console.warn('Portkey is only supported for OpenAI provider, falling back to direct API');
|
||||||
|
return makeChatCompletion({ apiKey, provider, messages, temperature, maxTokens, model });
|
||||||
|
}
|
||||||
|
|
||||||
|
const fetchUrl = 'https://api.portkey.ai/v1/chat/completions';
|
||||||
|
const response = await fetch(fetchUrl, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'x-portkey-api-key': 'gRv2UGRMq6GGLJ8aVEB4e7adIewu',
|
||||||
|
'x-portkey-virtual-key': portkeyVirtualKey || apiKey,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: model || 'gpt-4.1',
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Portkey API error: ${response.status} ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return {
|
||||||
|
content: result.choices[0].message.content.trim(),
|
||||||
|
raw: result
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes a streaming chat completion request
|
||||||
|
* @param {object} params - Request parameters
|
||||||
|
* @returns {Promise<Response>} The streaming response
|
||||||
|
*/
|
||||||
|
async function makeStreamingChatCompletion({ apiKey, provider = 'openai', messages, temperature = 0.7, maxTokens = 1024, model }) {
|
||||||
|
if (provider === 'openai') {
|
||||||
|
const fetchUrl = 'https://api.openai.com/v1/chat/completions';
|
||||||
|
const response = await fetch(fetchUrl, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: model || 'gpt-4.1',
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return response;
|
||||||
|
} else if (provider === 'gemini') {
|
||||||
|
console.log('[AIProviderService] Starting Gemini streaming request');
|
||||||
|
// Gemini streaming requires a different approach
|
||||||
|
// We'll create a ReadableStream that mimics OpenAI's SSE format
|
||||||
|
const geminiClient = createGeminiClient(apiKey);
|
||||||
|
|
||||||
|
// Extract system instruction if present
|
||||||
|
let systemInstruction = '';
|
||||||
|
const nonSystemMessages = [];
|
||||||
|
|
||||||
|
for (const msg of messages) {
|
||||||
|
if (msg.role === 'system') {
|
||||||
|
systemInstruction = msg.content;
|
||||||
|
} else {
|
||||||
|
nonSystemMessages.push(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const chat = createGeminiChat(geminiClient, model || 'gemini-2.0-flash-exp', {
|
||||||
|
temperature,
|
||||||
|
maxOutputTokens: maxTokens || 8192,
|
||||||
|
systemInstruction: systemInstruction || undefined
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a ReadableStream to handle Gemini's streaming
|
||||||
|
const stream = new ReadableStream({
|
||||||
|
async start(controller) {
|
||||||
|
try {
|
||||||
|
console.log('[AIProviderService] Processing messages for Gemini:', nonSystemMessages.length, 'messages (excluding system)');
|
||||||
|
|
||||||
|
// Get the last user message
|
||||||
|
const lastMessage = nonSystemMessages[nonSystemMessages.length - 1];
|
||||||
|
let lastUserMessage = lastMessage.content;
|
||||||
|
|
||||||
|
// Handle case where content might be an array (multimodal)
|
||||||
|
if (Array.isArray(lastUserMessage)) {
|
||||||
|
// Extract text content from array
|
||||||
|
const textParts = lastUserMessage.filter(part =>
|
||||||
|
typeof part === 'string' || (part && part.type === 'text')
|
||||||
|
);
|
||||||
|
lastUserMessage = textParts.map(part =>
|
||||||
|
typeof part === 'string' ? part : part.text
|
||||||
|
).join(' ');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('[AIProviderService] Sending message to Gemini:',
|
||||||
|
typeof lastUserMessage === 'string' ? lastUserMessage.substring(0, 100) + '...' : 'multimodal content');
|
||||||
|
|
||||||
|
// Prepare the message content for Gemini
|
||||||
|
let geminiContent = [];
|
||||||
|
|
||||||
|
// Handle multimodal content properly
|
||||||
|
if (Array.isArray(lastMessage.content)) {
|
||||||
|
for (const part of lastMessage.content) {
|
||||||
|
if (typeof part === 'string') {
|
||||||
|
geminiContent.push(part);
|
||||||
|
} else if (part.type === 'text') {
|
||||||
|
geminiContent.push(part.text);
|
||||||
|
} else if (part.type === 'image_url' && part.image_url) {
|
||||||
|
// Convert base64 image to Gemini format
|
||||||
|
const base64Data = part.image_url.url.split(',')[1];
|
||||||
|
geminiContent.push({
|
||||||
|
inlineData: {
|
||||||
|
mimeType: 'image/png',
|
||||||
|
data: base64Data
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
geminiContent = [lastUserMessage];
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('[AIProviderService] Prepared Gemini content:',
|
||||||
|
geminiContent.length, 'parts');
|
||||||
|
|
||||||
|
// Stream the response
|
||||||
|
let chunkCount = 0;
|
||||||
|
let totalContent = '';
|
||||||
|
|
||||||
|
for await (const chunk of chat.sendMessageStream(geminiContent)) {
|
||||||
|
chunkCount++;
|
||||||
|
const chunkText = chunk.text || '';
|
||||||
|
totalContent += chunkText;
|
||||||
|
|
||||||
|
// Format as SSE data
|
||||||
|
const data = JSON.stringify({
|
||||||
|
choices: [{
|
||||||
|
delta: {
|
||||||
|
content: chunkText
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
controller.enqueue(new TextEncoder().encode(`data: ${data}\n\n`));
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[AIProviderService] Streamed ${chunkCount} chunks, total length: ${totalContent.length} chars`);
|
||||||
|
|
||||||
|
// Send the final done message
|
||||||
|
controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n'));
|
||||||
|
controller.close();
|
||||||
|
console.log('[AIProviderService] Gemini streaming completed successfully');
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[AIProviderService] Gemini streaming error:', error);
|
||||||
|
controller.error(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a Response object with the stream
|
||||||
|
return new Response(stream, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'text/event-stream',
|
||||||
|
'Cache-Control': 'no-cache',
|
||||||
|
'Connection': 'keep-alive'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
throw new Error(`Unsupported AI provider: ${provider}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes a streaming chat completion request with Portkey support
|
||||||
|
* @param {object} params - Request parameters
|
||||||
|
* @returns {Promise<Response>} The streaming response
|
||||||
|
*/
|
||||||
|
async function makeStreamingChatCompletionWithPortkey({
|
||||||
|
apiKey,
|
||||||
|
provider = 'openai',
|
||||||
|
messages,
|
||||||
|
temperature = 0.7,
|
||||||
|
maxTokens = 1024,
|
||||||
|
model,
|
||||||
|
usePortkey = false,
|
||||||
|
portkeyVirtualKey = null
|
||||||
|
}) {
|
||||||
|
if (!usePortkey) {
|
||||||
|
return makeStreamingChatCompletion({ apiKey, provider, messages, temperature, maxTokens, model });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Portkey is only supported for OpenAI currently
|
||||||
|
if (provider !== 'openai') {
|
||||||
|
console.warn('Portkey is only supported for OpenAI provider, falling back to direct API');
|
||||||
|
return makeStreamingChatCompletion({ apiKey, provider, messages, temperature, maxTokens, model });
|
||||||
|
}
|
||||||
|
|
||||||
|
const fetchUrl = 'https://api.portkey.ai/v1/chat/completions';
|
||||||
|
const response = await fetch(fetchUrl, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'x-portkey-api-key': 'gRv2UGRMq6GGLJ8aVEB4e7adIewu',
|
||||||
|
'x-portkey-virtual-key': portkeyVirtualKey || apiKey,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: model || 'gpt-4.1',
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Portkey API error: ${response.status} ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createAIClient,
|
||||||
|
getGenerativeModel,
|
||||||
|
makeChatCompletion,
|
||||||
|
makeChatCompletionWithPortkey,
|
||||||
|
makeStreamingChatCompletion,
|
||||||
|
makeStreamingChatCompletionWithPortkey
|
||||||
|
};
|
120
src/common/services/googleGeminiClient.js
Normal file
120
src/common/services/googleGeminiClient.js
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
const { GoogleGenerativeAI } = require('@google/generative-ai');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and returns a Google Gemini client instance for generative AI.
|
||||||
|
* @param {string} apiKey - The API key for authentication.
|
||||||
|
* @returns {GoogleGenerativeAI} The initialized Gemini client.
|
||||||
|
*/
|
||||||
|
function createGeminiClient(apiKey) {
|
||||||
|
return new GoogleGenerativeAI(apiKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a Gemini model for text/image generation.
|
||||||
|
* @param {GoogleGenerativeAI} client - The Gemini client instance.
|
||||||
|
* @param {string} [model='gemini-2.5-flash'] - The name for the text/vision model.
|
||||||
|
* @returns {object} Model object with generateContent method
|
||||||
|
*/
|
||||||
|
function getGeminiGenerativeModel(client, model = 'gemini-2.5-flash') {
|
||||||
|
const genAI = client;
|
||||||
|
const geminiModel = genAI.getGenerativeModel({ model: model });
|
||||||
|
|
||||||
|
return {
|
||||||
|
generateContent: async (parts) => {
|
||||||
|
let systemPrompt = '';
|
||||||
|
let userContent = [];
|
||||||
|
|
||||||
|
for (const part of parts) {
|
||||||
|
if (typeof part === 'string') {
|
||||||
|
if (systemPrompt === '' && part.includes('You are')) {
|
||||||
|
systemPrompt = part;
|
||||||
|
} else {
|
||||||
|
userContent.push(part);
|
||||||
|
}
|
||||||
|
} else if (part.inlineData) {
|
||||||
|
// Convert base64 image data to Gemini format
|
||||||
|
userContent.push({
|
||||||
|
inlineData: {
|
||||||
|
mimeType: part.inlineData.mimeType,
|
||||||
|
data: part.inlineData.data
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare content array
|
||||||
|
const content = [];
|
||||||
|
|
||||||
|
// Add system instruction if present
|
||||||
|
if (systemPrompt) {
|
||||||
|
// For Gemini, we'll prepend system prompt to user content
|
||||||
|
content.push(systemPrompt + '\n\n' + userContent[0]);
|
||||||
|
content.push(...userContent.slice(1));
|
||||||
|
} else {
|
||||||
|
content.push(...userContent);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await geminiModel.generateContent(content);
|
||||||
|
const response = await result.response;
|
||||||
|
|
||||||
|
return {
|
||||||
|
response: {
|
||||||
|
text: () => response.text()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Gemini API error:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Gemini chat session for multi-turn conversations.
|
||||||
|
* @param {GoogleGenerativeAI} client - The Gemini client instance.
|
||||||
|
* @param {string} [model='gemini-2.5-flash'] - The model to use.
|
||||||
|
* @param {object} [config={}] - Configuration options.
|
||||||
|
* @returns {object} Chat session object
|
||||||
|
*/
|
||||||
|
function createGeminiChat(client, model = 'gemini-2.5-flash', config = {}) {
|
||||||
|
const genAI = client;
|
||||||
|
const geminiModel = genAI.getGenerativeModel({
|
||||||
|
model: model,
|
||||||
|
systemInstruction: config.systemInstruction
|
||||||
|
});
|
||||||
|
|
||||||
|
const chat = geminiModel.startChat({
|
||||||
|
history: config.history || [],
|
||||||
|
generationConfig: {
|
||||||
|
temperature: config.temperature || 0.7,
|
||||||
|
maxOutputTokens: config.maxOutputTokens || 8192,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
sendMessage: async (message) => {
|
||||||
|
const result = await chat.sendMessage(message);
|
||||||
|
const response = await result.response;
|
||||||
|
return {
|
||||||
|
text: response.text()
|
||||||
|
};
|
||||||
|
},
|
||||||
|
sendMessageStream: async function* (message) {
|
||||||
|
const result = await chat.sendMessageStream(message);
|
||||||
|
for await (const chunk of result.stream) {
|
||||||
|
yield {
|
||||||
|
text: chunk.text()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
},
|
||||||
|
getHistory: () => chat.getHistory()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createGeminiClient,
|
||||||
|
getGeminiGenerativeModel,
|
||||||
|
createGeminiChat
|
||||||
|
};
|
@ -43,7 +43,8 @@ class SQLiteClient {
|
|||||||
display_name TEXT NOT NULL,
|
display_name TEXT NOT NULL,
|
||||||
email TEXT NOT NULL,
|
email TEXT NOT NULL,
|
||||||
created_at INTEGER,
|
created_at INTEGER,
|
||||||
api_key TEXT
|
api_key TEXT,
|
||||||
|
provider TEXT DEFAULT 'openai'
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS sessions (
|
CREATE TABLE IF NOT EXISTS sessions (
|
||||||
@ -110,7 +111,14 @@ class SQLiteClient {
|
|||||||
return reject(err);
|
return reject(err);
|
||||||
}
|
}
|
||||||
console.log('All tables are ready.');
|
console.log('All tables are ready.');
|
||||||
this.initDefaultData().then(resolve).catch(reject);
|
|
||||||
|
// Add provider column to existing databases
|
||||||
|
this.db.run("ALTER TABLE users ADD COLUMN provider TEXT DEFAULT 'openai'", (alterErr) => {
|
||||||
|
if (alterErr && !alterErr.message.includes('duplicate column')) {
|
||||||
|
console.log('Note: Could not add provider column (may already exist)');
|
||||||
|
}
|
||||||
|
this.initDefaultData().then(resolve).catch(reject);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -190,17 +198,17 @@ class SQLiteClient {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async saveApiKey(apiKey, uid = this.defaultUserId) {
|
async saveApiKey(apiKey, uid = this.defaultUserId, provider = 'openai') {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
this.db.run(
|
this.db.run(
|
||||||
'UPDATE users SET api_key = ? WHERE uid = ?',
|
'UPDATE users SET api_key = ?, provider = ? WHERE uid = ?',
|
||||||
[apiKey, uid],
|
[apiKey, provider, uid],
|
||||||
function(err) {
|
function(err) {
|
||||||
if (err) {
|
if (err) {
|
||||||
console.error('SQLite: Failed to save API key:', err);
|
console.error('SQLite: Failed to save API key:', err);
|
||||||
reject(err);
|
reject(err);
|
||||||
} else {
|
} else {
|
||||||
console.log(`SQLite: API key saved for user ${uid}.`);
|
console.log(`SQLite: API key saved for user ${uid} with provider ${provider}.`);
|
||||||
resolve({ changes: this.changes });
|
resolve({ changes: this.changes });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -4,10 +4,11 @@ const { spawn } = require('child_process');
|
|||||||
const { saveDebugAudio } = require('./audioUtils.js');
|
const { saveDebugAudio } = require('./audioUtils.js');
|
||||||
const { getSystemPrompt } = require('../../common/prompts/promptBuilder.js');
|
const { getSystemPrompt } = require('../../common/prompts/promptBuilder.js');
|
||||||
const { connectToOpenAiSession, createOpenAiGenerativeClient, getOpenAiGenerativeModel } = require('../../common/services/openAiClient.js');
|
const { connectToOpenAiSession, createOpenAiGenerativeClient, getOpenAiGenerativeModel } = require('../../common/services/openAiClient.js');
|
||||||
|
const { makeChatCompletionWithPortkey } = require('../../common/services/aiProviderService.js');
|
||||||
const sqliteClient = require('../../common/services/sqliteClient');
|
const sqliteClient = require('../../common/services/sqliteClient');
|
||||||
const dataService = require('../../common/services/dataService');
|
const dataService = require('../../common/services/dataService');
|
||||||
|
|
||||||
const { isFirebaseLoggedIn, getCurrentFirebaseUser } = require('../../electron/windowManager.js');
|
const { isFirebaseLoggedIn, getCurrentFirebaseUser, getStoredProvider } = require('../../electron/windowManager.js');
|
||||||
|
|
||||||
function getApiKey() {
|
function getApiKey() {
|
||||||
const { getStoredApiKey } = require('../../electron/windowManager.js');
|
const { getStoredApiKey } = require('../../electron/windowManager.js');
|
||||||
@ -28,6 +29,18 @@ function getApiKey() {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function getAiProvider() {
|
||||||
|
try {
|
||||||
|
const { ipcRenderer } = require('electron');
|
||||||
|
const provider = await ipcRenderer.invoke('get-ai-provider');
|
||||||
|
return provider || 'openai';
|
||||||
|
} catch (error) {
|
||||||
|
// If we're in the main process, get it directly
|
||||||
|
const { getStoredProvider } = require('../../electron/windowManager.js');
|
||||||
|
return getStoredProvider ? getStoredProvider() : 'openai';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let currentSessionId = null;
|
let currentSessionId = null;
|
||||||
let conversationHistory = [];
|
let conversationHistory = [];
|
||||||
let isInitializingSession = false;
|
let isInitializingSession = false;
|
||||||
@ -206,41 +219,25 @@ Keep all points concise and build upon previous analysis if provided.`,
|
|||||||
if (!API_KEY) {
|
if (!API_KEY) {
|
||||||
throw new Error('No API key available');
|
throw new Error('No API key available');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const provider = getStoredProvider ? getStoredProvider() : 'openai';
|
||||||
const loggedIn = isFirebaseLoggedIn(); // true ➜ vKey, false ➜ apiKey
|
const loggedIn = isFirebaseLoggedIn(); // true ➜ vKey, false ➜ apiKey
|
||||||
const keyType = loggedIn ? 'vKey' : 'apiKey';
|
const usePortkey = loggedIn && provider === 'openai'; // Only use Portkey for OpenAI with Firebase
|
||||||
console.log(`[LiveSummary] keyType: ${keyType}`);
|
|
||||||
|
console.log(`[LiveSummary] provider: ${provider}, usePortkey: ${usePortkey}`);
|
||||||
|
|
||||||
const fetchUrl = keyType === 'apiKey' ? 'https://api.openai.com/v1/chat/completions' : 'https://api.portkey.ai/v1/chat/completions';
|
const completion = await makeChatCompletionWithPortkey({
|
||||||
|
apiKey: API_KEY,
|
||||||
const headers =
|
provider: provider,
|
||||||
keyType === 'apiKey'
|
messages: messages,
|
||||||
? {
|
temperature: 0.7,
|
||||||
Authorization: `Bearer ${API_KEY}`,
|
maxTokens: 1024,
|
||||||
'Content-Type': 'application/json',
|
model: provider === 'openai' ? 'gpt-4.1' : 'gemini-2.5-flash',
|
||||||
}
|
usePortkey: usePortkey,
|
||||||
: {
|
portkeyVirtualKey: usePortkey ? API_KEY : null
|
||||||
'x-portkey-api-key': 'gRv2UGRMq6GGLJ8aVEB4e7adIewu',
|
|
||||||
'x-portkey-virtual-key': API_KEY,
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(fetchUrl, {
|
|
||||||
method: 'POST',
|
|
||||||
headers,
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: 'gpt-4.1',
|
|
||||||
messages,
|
|
||||||
temperature: 0.7,
|
|
||||||
max_tokens: 1024,
|
|
||||||
}),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
const responseText = completion.content;
|
||||||
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await response.json();
|
|
||||||
const responseText = result.choices[0].message.content.trim();
|
|
||||||
console.log(`✅ Analysis response received: ${responseText}`);
|
console.log(`✅ Analysis response received: ${responseText}`);
|
||||||
const structuredData = parseResponseText(responseText, previousAnalysisResult);
|
const structuredData = parseResponseText(responseText, previousAnalysisResult);
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// renderer.js
|
// renderer.js
|
||||||
const { ipcRenderer } = require('electron');
|
const { ipcRenderer } = require('electron');
|
||||||
|
const { makeStreamingChatCompletionWithPortkey } = require('../../common/services/aiProviderService.js');
|
||||||
|
|
||||||
let mediaStream = null;
|
let mediaStream = null;
|
||||||
let screenshotInterval = null;
|
let screenshotInterval = null;
|
||||||
@ -229,14 +230,10 @@ class SimpleAEC {
|
|||||||
this.sampleRate = 24000;
|
this.sampleRate = 24000;
|
||||||
this.delaySamples = Math.floor((this.echoDelay / 1000) * this.sampleRate);
|
this.delaySamples = Math.floor((this.echoDelay / 1000) * this.sampleRate);
|
||||||
|
|
||||||
this.echoGain = 0.9;
|
this.echoGain = 0.5;
|
||||||
this.noiseFloor = 0.01;
|
this.noiseFloor = 0.01;
|
||||||
|
|
||||||
// 🔧 Adaptive-gain parameters (User-tuned, very aggressive)
|
console.log('🎯 Weakened AEC initialized');
|
||||||
this.targetErr = 0.002;
|
|
||||||
this.adaptRate = 0.1;
|
|
||||||
|
|
||||||
console.log('🎯 AEC initialized (hyper-aggressive)');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
process(micData, systemData) {
|
process(micData, systemData) {
|
||||||
@ -244,19 +241,6 @@ class SimpleAEC {
|
|||||||
return micData;
|
return micData;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (let i = 0; i < systemData.length; i++) {
|
|
||||||
if (systemData[i] > 0.98) systemData[i] = 0.98;
|
|
||||||
else if (systemData[i] < -0.98) systemData[i] = -0.98;
|
|
||||||
|
|
||||||
systemData[i] = Math.tanh(systemData[i] * 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
let sum2 = 0;
|
|
||||||
for (let i = 0; i < systemData.length; i++) sum2 += systemData[i] * systemData[i];
|
|
||||||
const rms = Math.sqrt(sum2 / systemData.length);
|
|
||||||
const targetRms = 0.08; // 🔧 기준 RMS (기존 0.1)
|
|
||||||
const scale = targetRms / (rms + 1e-6); // 1e-6: 0-division 방지
|
|
||||||
|
|
||||||
const output = new Float32Array(micData.length);
|
const output = new Float32Array(micData.length);
|
||||||
|
|
||||||
const optimalDelay = this.findOptimalDelay(micData, systemData);
|
const optimalDelay = this.findOptimalDelay(micData, systemData);
|
||||||
@ -268,32 +252,23 @@ class SimpleAEC {
|
|||||||
const delayIndex = i - optimalDelay - d;
|
const delayIndex = i - optimalDelay - d;
|
||||||
if (delayIndex >= 0 && delayIndex < systemData.length) {
|
if (delayIndex >= 0 && delayIndex < systemData.length) {
|
||||||
const weight = Math.exp(-Math.abs(d) / 1000);
|
const weight = Math.exp(-Math.abs(d) / 1000);
|
||||||
echoEstimate += systemData[delayIndex] * scale * this.echoGain * weight;
|
echoEstimate += systemData[delayIndex] * this.echoGain * weight;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output[i] = micData[i] - echoEstimate * 0.9;
|
output[i] = micData[i] - echoEstimate * 0.5;
|
||||||
|
|
||||||
if (Math.abs(output[i]) < this.noiseFloor) {
|
if (Math.abs(output[i]) < this.noiseFloor) {
|
||||||
output[i] *= 0.5;
|
output[i] *= 0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.isSimilarToSystem(output[i], systemData, i, optimalDelay)) {
|
if (this.isSimilarToSystem(output[i], systemData, i, optimalDelay)) {
|
||||||
output[i] *= 0.25;
|
output[i] *= 0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
output[i] = Math.max(-1, Math.min(1, output[i]));
|
output[i] = Math.max(-1, Math.min(1, output[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
let errSum = 0;
|
|
||||||
for (let i = 0; i < output.length; i++) errSum += output[i] * output[i];
|
|
||||||
const errRms = Math.sqrt(errSum / output.length);
|
|
||||||
|
|
||||||
const err = errRms - this.targetErr;
|
|
||||||
this.echoGain += this.adaptRate * err; // 비례 제어
|
|
||||||
this.echoGain = Math.max(0, Math.min(1, this.echoGain));
|
|
||||||
|
|
||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,7 +310,7 @@ class SimpleAEC {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return similarity / (2 * windowSize + 1) < 0.15;
|
return similarity / (2 * windowSize + 1) < 0.2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -998,40 +973,22 @@ async function sendMessage(userPrompt, options = {}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const { isLoggedIn } = await queryLoginState();
|
const { isLoggedIn } = await queryLoginState();
|
||||||
const keyType = isLoggedIn ? 'vKey' : 'apiKey';
|
const provider = await ipcRenderer.invoke('get-ai-provider');
|
||||||
|
const usePortkey = isLoggedIn && provider === 'openai';
|
||||||
|
|
||||||
console.log('🚀 Sending request to OpenAI...');
|
console.log(`🚀 Sending request to ${provider} AI...`);
|
||||||
const { url, headers } =
|
|
||||||
keyType === 'apiKey'
|
const response = await makeStreamingChatCompletionWithPortkey({
|
||||||
? {
|
apiKey: API_KEY,
|
||||||
url: 'https://api.openai.com/v1/chat/completions',
|
provider: provider,
|
||||||
headers: { Authorization: `Bearer ${API_KEY}`, 'Content-Type': 'application/json' },
|
messages: messages,
|
||||||
}
|
temperature: 0.7,
|
||||||
: {
|
maxTokens: 2048,
|
||||||
url: 'https://api.portkey.ai/v1/chat/completions',
|
model: provider === 'openai' ? 'gpt-4.1' : 'gemini-2.5-flash',
|
||||||
headers: {
|
usePortkey: usePortkey,
|
||||||
'x-portkey-api-key': 'gRv2UGRMq6GGLJ8aVEB4e7adIewu',
|
portkeyVirtualKey: usePortkey ? API_KEY : null
|
||||||
'x-portkey-virtual-key': API_KEY,
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(url, {
|
|
||||||
method: 'POST',
|
|
||||||
headers,
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: 'gpt-4.1',
|
|
||||||
messages,
|
|
||||||
temperature: 0.7,
|
|
||||||
max_tokens: 2048,
|
|
||||||
stream: true,
|
|
||||||
}),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- 스트리밍 응답 처리 ---
|
// --- 스트리밍 응답 처리 ---
|
||||||
const reader = response.body.getReader();
|
const reader = response.body.getReader();
|
||||||
const decoder = new TextDecoder();
|
const decoder = new TextDecoder();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user