diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index 47926f050b75..c774a04089b3 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -23,8 +23,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Set the window icon on Linux ([#2880](https://github.com/nomic-ai/gpt4all/pull/2880)) - Corrections to the Romanian translation (by [@SINAPSA-IC](https://github.com/SINAPSA-IC) in [#2890](https://github.com/nomic-ai/gpt4all/pull/2890)) - Fix singular/plural forms of LocalDocs "x Sources" (by [@cosmic-snow](https://github.com/cosmic-snow) in [#2885](https://github.com/nomic-ai/gpt4all/pull/2885)) -- Fixed typo in several files. (by [@3Simplex](https://github.com/3Simplex) in [#2916](https://github.com/nomic-ai/gpt4all/pull/2916)) +- Fix a typo in Model Settings (by [@3Simplex](https://github.com/3Simplex) in [#2916](https://github.com/nomic-ai/gpt4all/pull/2916)) - Fix the antenna icon tooltip when using the local server ([#2922](https://github.com/nomic-ai/gpt4all/pull/2922)) +- Fix a few issues with locating files and handling errors when loading remote models on startup ([#2875](https://github.com/nomic-ai/gpt4all/pull/2875)) ## [3.2.1] - 2024-08-13 diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp index 13711057242b..d53c8fbfa316 100644 --- a/gpt4all-chat/src/modellist.cpp +++ b/gpt4all-chat/src/modellist.cpp @@ -1208,132 +1208,139 @@ bool ModelList::modelExists(const QString &modelFilename) const return false; } -void ModelList::updateModelsFromDirectory() +void ModelList::updateOldRemoteModels(const QString &path) { - const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator(); - const QString localPath = MySettings::globalInstance()->modelPath(); + QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories); + while (it.hasNext()) { + QFileInfo info = it.nextFileInfo(); + QString filename = it.fileName(); + if (!filename.startsWith("chatgpt-") || !filename.endsWith(".txt")) + continue; - auto updateOldRemoteModels = [&](const QString& path) { - QDirIterator it(path, QDirIterator::Subdirectories); - while (it.hasNext()) { - it.next(); - if (!it.fileInfo().isDir()) { - QString filename = it.fileName(); - if (filename.startsWith("chatgpt-") && filename.endsWith(".txt")) { - QString apikey; - QString modelname(filename); - modelname.chop(4); // strip ".txt" extension - modelname.remove(0, 8); // strip "chatgpt-" prefix - QFile file(path + filename); - if (file.open(QIODevice::ReadWrite)) { - QTextStream in(&file); - apikey = in.readAll(); - file.close(); - } + QString apikey; + QString modelname(filename); + modelname.chop(4); // strip ".txt" extension + modelname.remove(0, 8); // strip "chatgpt-" prefix + QFile file(info.filePath()); + if (!file.open(QIODevice::ReadOnly)) { + qWarning().noquote() << tr("cannot open \"%1\": %2").arg(file.fileName(), file.errorString()); + continue; + } - QJsonObject obj; - obj.insert("apiKey", apikey); - obj.insert("modelName", modelname); - QJsonDocument doc(obj); - - auto newfilename = u"gpt4all-%1.rmodel"_s.arg(modelname); - QFile newfile(path + newfilename); - if (newfile.open(QIODevice::ReadWrite)) { - QTextStream out(&newfile); - out << doc.toJson(); - newfile.close(); - } - file.remove(); - } - } + { + QTextStream in(&file); + apikey = in.readAll(); + file.close(); } - }; - auto processDirectory = [&](const QString& path) { - QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories); - while (it.hasNext()) { - it.next(); + QFile newfile(u"%1/gpt4all-%2.rmodel"_s.arg(info.dir().path(), modelname)); + if (!newfile.open(QIODevice::ReadWrite)) { + qWarning().noquote() << tr("cannot create \"%1\": %2").arg(newfile.fileName(), file.errorString()); + continue; + } - QString filename = it.fileName(); - if (filename.startsWith("incomplete") || FILENAME_BLACKLIST.contains(filename)) - continue; - if (!filename.endsWith(".gguf") && !filename.endsWith(".rmodel")) - continue; + QJsonObject obj { + { "apiKey", apikey }, + { "modelName", modelname }, + }; - QVector modelsById; - { - QMutexLocker locker(&m_mutex); - for (ModelInfo *info : m_models) - if (info->filename() == filename) - modelsById.append(info->id()); - } + QTextStream out(&newfile); + out << QJsonDocument(obj).toJson(); + newfile.close(); - if (modelsById.isEmpty()) { - if (!contains(filename)) - addModel(filename); - modelsById.append(filename); - } + file.remove(); + } +} - QFileInfo info = it.fileInfo(); +void ModelList::processModelDirectory(const QString &path) +{ + QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories); + while (it.hasNext()) { + QFileInfo info = it.nextFileInfo(); - bool isOnline(filename.endsWith(".rmodel")); - bool isCompatibleApi(filename.endsWith("-capi.rmodel")); + QString filename = it.fileName(); + if (filename.startsWith("incomplete") || FILENAME_BLACKLIST.contains(filename)) + continue; + if (!filename.endsWith(".gguf") && !filename.endsWith(".rmodel")) + continue; - QString name; - QString description; - if (isCompatibleApi) { - QJsonObject obj; - { - QFile file(path + filename); - bool success = file.open(QIODeviceBase::ReadOnly); - (void)success; - Q_ASSERT(success); - QJsonDocument doc = QJsonDocument::fromJson(file.readAll()); - obj = doc.object(); - } - { - QString apiKey(obj["apiKey"].toString()); - QString baseUrl(obj["baseUrl"].toString()); - QString modelName(obj["modelName"].toString()); - apiKey = apiKey.length() < 10 ? "*****" : apiKey.left(5) + "*****"; - name = tr("%1 (%2)").arg(modelName, baseUrl); - description = tr("OpenAI-Compatible API Model
" - "") - .arg(apiKey, baseUrl, modelName); + bool isOnline(filename.endsWith(".rmodel")); + bool isCompatibleApi(filename.endsWith("-capi.rmodel")); + + QString name; + QString description; + if (isCompatibleApi) { + QJsonObject obj; + { + QFile file(info.filePath()); + if (!file.open(QIODeviceBase::ReadOnly)) { + qWarning().noquote() << tr("cannot open \"%1\": %2").arg(file.fileName(), file.errorString()); + continue; } + QJsonDocument doc = QJsonDocument::fromJson(file.readAll()); + obj = doc.object(); + } + { + QString apiKey(obj["apiKey"].toString()); + QString baseUrl(obj["baseUrl"].toString()); + QString modelName(obj["modelName"].toString()); + apiKey = apiKey.length() < 10 ? "*****" : apiKey.left(5) + "*****"; + name = tr("%1 (%2)").arg(modelName, baseUrl); + description = tr("OpenAI-Compatible API Model
" + "") + .arg(apiKey, baseUrl, modelName); } + } - for (const QString &id : modelsById) { - QVector> data { - { InstalledRole, true }, - { FilenameRole, filename }, - { OnlineRole, isOnline }, - { CompatibleApiRole, isCompatibleApi }, - { DirpathRole, info.dir().absolutePath() + "/" }, - { FilesizeRole, toFileSize(info.size()) }, - }; - if (isCompatibleApi) { - // The data will be saved to "GPT4All.ini". - data.append({ NameRole, name }); - // The description is hard-coded into "GPT4All.ini" due to performance issue. - // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList. - data.append({ DescriptionRole, description }); - // Prompt template should be clear while using ChatML format which is using in most of OpenAI-Compatible API server. - data.append({ PromptTemplateRole, "%1" }); - } - updateData(id, data); + QVector modelsById; + { + QMutexLocker locker(&m_mutex); + for (ModelInfo *info : m_models) + if (info->filename() == filename) + modelsById.append(info->id()); + } + + if (modelsById.isEmpty()) { + if (!contains(filename)) + addModel(filename); + modelsById.append(filename); + } + + for (const QString &id : modelsById) { + QVector> data { + { InstalledRole, true }, + { FilenameRole, filename }, + { OnlineRole, isOnline }, + { CompatibleApiRole, isCompatibleApi }, + { DirpathRole, info.dir().absolutePath() + "/" }, + { FilesizeRole, toFileSize(info.size()) }, + }; + if (isCompatibleApi) { + // The data will be saved to "GPT4All.ini". + data.append({ NameRole, name }); + // The description is hard-coded into "GPT4All.ini" due to performance issue. + // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList. + data.append({ DescriptionRole, description }); + // Prompt template should be clear while using ChatML format which is using in most of OpenAI-Compatible API server. + data.append({ PromptTemplateRole, "%1" }); } + updateData(id, data); } - }; + } +} +void ModelList::updateModelsFromDirectory() +{ + const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator(); + const QString localPath = MySettings::globalInstance()->modelPath(); updateOldRemoteModels(exePath); - processDirectory(exePath); + processModelDirectory(exePath); if (localPath != exePath) { updateOldRemoteModels(localPath); - processDirectory(localPath); + processModelDirectory(localPath); } } diff --git a/gpt4all-chat/src/modellist.h b/gpt4all-chat/src/modellist.h index 7c13da8ef4fd..21d9aeefa4d5 100644 --- a/gpt4all-chat/src/modellist.h +++ b/gpt4all-chat/src/modellist.h @@ -502,6 +502,8 @@ private Q_SLOTS: void parseModelsJsonFile(const QByteArray &jsonData, bool save); void parseDiscoveryJsonFile(const QByteArray &jsonData); QString uniqueModelName(const ModelInfo &model) const; + void updateOldRemoteModels(const QString &path); + void processModelDirectory(const QString &path); private: mutable QMutex m_mutex; diff --git a/gpt4all-chat/translations/gpt4all_en_US.ts b/gpt4all-chat/translations/gpt4all_en_US.ts index be7dee3fe267..e46ae94cc7ed 100644 --- a/gpt4all-chat/translations/gpt4all_en_US.ts +++ b/gpt4all-chat/translations/gpt4all_en_US.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections - + Add Document Collection - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. - + Please choose a directory - + Name - + Collection name... - + Name of the collection to add (Required) - + Folder - + Folder path... - + Folder path to documents (Required) - + Browse - + Create Collection @@ -67,288 +67,288 @@ AddModelView - + ← Existing Models - + Explore Models - + Discover and download models by keyword search... - + Text field for discovering and filtering downloadable models - + Initiate model discovery and filtering - + Triggers discovery and filtering of models - + Default - + Likes - + Downloads - + Recent - + Asc - + Desc - + None - + Searching · %1 - + Sort by: %1 - + Sort dir: %1 - + Limit: %1 - + Network error: could not retrieve %1 - - + + Busy indicator - + Displayed when the models request is ongoing - + Model file - + Model file to be downloaded - + Description - + File description - + Cancel - + Resume - + Download - + Stop/restart/start the download - + Remove - + Remove model from filesystem - - + + Install - + Install online model - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> - + ERROR: $API_KEY is empty. - + ERROR: $BASE_URL is empty. - + enter $BASE_URL - + ERROR: $MODEL_NAME is empty. - + enter $MODEL_NAME - + %1 GB - - + + ? - + Describes an error that occurred when downloading - + <strong><font size="1"><a href="#error">Error</a></strong></font> - + Error for incompatible hardware - + Download progressBar - + Shows the progress made in the download - + Download speed - + Download speed in bytes/kilobytes/megabytes per second - + Calculating... - - - - + + + + Whether the file hash is being calculated - + Displayed when the file hash is being calculated - + enter $API_KEY - + File size - + RAM required - + Parameters - + Quant - + Type @@ -356,22 +356,22 @@ ApplicationSettings - + Application - + Network dialog - + opt-in to share feedback/conversations - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -382,223 +382,223 @@ - + Error dialog - + Application Settings - + General - + Theme - + The application color scheme. - + Dark - + Light - + LegacyDark - + Font Size - + The size of text in the application. - + Small - + Medium - + Large - + Language and Locale - + The language and locale you wish to use. - + System Locale - + Device - + The compute device used for text generation. - - + + Application default - + Default Model - + The preferred model for new chats. Also used as the local server fallback. - + Suggestion Mode - + Generate suggested follow-up questions at the end of responses. - + When chatting with LocalDocs - + Whenever possible - + Never - + Download Path - + Where to store local models and the LocalDocs database. - + Browse - + Choose where to save model files - + Enable Datalake - + Send chats and feedback to the GPT4All Open-Source Datalake. - + Advanced - + CPU Threads - + The number of CPU threads used for inference and embedding. - + Save Chat Context - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. - + Enable Local Server - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. - + API Server Port - + The port to use for the local server. Requires restart. - + Check For Updates - + Manually check for an update to GPT4All. - + Updates @@ -606,13 +606,13 @@ Chat - - + + New Chat - + Server Chat @@ -620,12 +620,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 @@ -633,62 +633,62 @@ ChatDrawer - + Drawer - + Main navigation drawer - + + New Chat - + Create a new chat - + Select the current chat or edit the chat when in edit mode - + Edit chat name - + Save chat name - + Delete chat - + Confirm chat deletion - + Cancel chat deletion - + List of chats - + List of chats in the drawer dialog @@ -696,32 +696,32 @@ ChatListModel - + TODAY - + THIS WEEK - + THIS MONTH - + LAST SIX MONTHS - + THIS YEAR - + LAST YEAR @@ -729,215 +729,215 @@ ChatView - + <h3>Warning</h3><p>%1</p> - + Switch model dialog - + Warn the user if they switch models, then context will be erased - + Conversation copied to clipboard. - + Code copied to clipboard. - + Chat panel - + Chat panel with options - + Reload the currently loaded model - + Eject the currently loaded model - + No model installed. - + Model loading error. - + Waiting for model... - + Switching context... - + Choose a model... - + Not found: %1 - + The top item is the current model - - + + LocalDocs - + Add documents - + add collections of documents to the chat - + Load the default model - + Loads the default model which can be changed in settings - + No Model Installed - + GPT4All requires that you install at least one model to get started - + Install a Model - + Shows the add model view - + Conversation with the model - + prompt / response pairs from the conversation - + GPT4All - + You - + response stopped ... - + processing ... - + generating response ... - + generating questions ... - - + + Copy - + Copy Message - + Disable markdown - + Enable markdown - + Thumbs up - + Gives a thumbs up to the response - + Thumbs down - + Opens thumbs down dialog - + %n Source(s) %n Source @@ -945,113 +945,113 @@ model to get started - + Suggested follow-ups - + Erase and reset chat session - + Copy chat session to clipboard - + Redo last chat response - + Stop generating - + Stop the current response generation - + Reloads the model - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help - - + + Reload · %1 - + Loading · %1 - + Load · %1 (default) → - + restoring from text ... - + retrieving localdocs: %1 ... - + searching localdocs: %1 ... - + Send a message... - + Load a model to continue... - + Send messages/prompts to the model - + Cut - + Paste - + Select All - + Send message - + Sends the message/prompt contained in textfield to the model @@ -1059,12 +1059,12 @@ model to get started CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results - + %n file(s) %n file @@ -1072,7 +1072,7 @@ model to get started - + %n word(s) %n word @@ -1080,17 +1080,17 @@ model to get started - + Updating - + + Add Docs - + Select a collection to make it available to the chat model. @@ -1098,37 +1098,37 @@ model to get started Download - + Model "%1" is installed successfully. - + ERROR: $MODEL_NAME is empty. - + ERROR: $API_KEY is empty. - + ERROR: $BASE_URL is invalid. - + ERROR: Model "%1 (%2)" is conflict. - + Model "%1 (%2)" is installed successfully. - + Model "%1" is removed. @@ -1136,92 +1136,92 @@ model to get started HomeView - + Welcome to GPT4All - + The privacy-first LLM chat application - + Start chatting - + Start Chatting - + Chat with any LLM - + LocalDocs - + Chat with your local files - + Find Models - + Explore and download models - + Latest news - + Latest news from GPT4All - + Release Notes - + Documentation - + Discord - + X (Twitter) - + Github - + nomic.ai - + Subscribe to Newsletter @@ -1229,117 +1229,117 @@ model to get started LocalDocsSettings - + LocalDocs - + LocalDocs Settings - + Indexing - + Allowed File Extensions - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. - + Embedding - + Use Nomic Embed API - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. - + Nomic API Key - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. - + Embeddings Device - + The compute device used for embeddings. Requires restart. - + Application default - + Display - + Show Sources - + Display the sources used for each response. - + Advanced - + Warning: Advanced usage only. - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. - + Document snippet size (characters) - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. - + Max document snippets per prompt - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. @@ -1347,117 +1347,117 @@ model to get started LocalDocsView - + LocalDocs - + Chat with your local files - + + Add Collection - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. - + No Collections Installed - + Install a collection of local documents to get started using this feature - + + Add Doc Collection - + Shows the add model view - + Indexing progressBar - + Shows the progress made in the indexing - + ERROR - + INDEXING - + EMBEDDING - + REQUIRES UPDATE - + READY - + INSTALLING - + Indexing in progress - + Embedding in progress - + This collection requires an update after version change - + Automatically reindexes upon changes to the folder - + Installation in progress - + % - + %n file(s) %n file @@ -1465,7 +1465,7 @@ model to get started - + %n word(s) %n word @@ -1473,27 +1473,27 @@ model to get started - + Remove - + Rebuild - + Reindex this folder from scratch. This is slow and usually not needed. - + Update - + Update the collection to the new version. This is a slow operation. @@ -1501,67 +1501,78 @@ model to get started ModelList - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 - + <strong>Mistral Tiny model</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> @@ -1569,217 +1580,217 @@ model to get started ModelSettings - + Model - + Model Settings - + Clone - + Remove - + Name - + Model File - + System Prompt - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. - + Prompt Template - + The template that wraps every prompt. - + Must contain the string "%1" to be replaced with the user's input. - + Chat Name Prompt - + Prompt used to automatically generate chat names. - + Suggested FollowUp Prompt - + Prompt used to generate suggested follow-up questions. - + Context Length - + Number of input and output tokens the model sees. - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. - + Temperature - + Randomness of model output. Higher -> more variation. - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. - + Top-P - + Nucleus Sampling factor. Lower -> more predictable. - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. - + Min-P - + Minimum token probability. Higher -> more predictable. - + Sets the minimum relative probability for a token to be considered. - + Top-K - + Size of selection pool for tokens. - + Only the top K most likely tokens will be chosen from. - + Max Length - + Maximum response length, in tokens. - + Prompt Batch Size - + The batch size used for prompt processing. - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. - + Repeat Penalty - + Repetition penalty factor. Set to 1 to disable. - + Repeat Penalty Tokens - + Number of previous tokens used for penalty. - + GPU Layers - + Number of model layers to load into VRAM. - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1789,217 +1800,217 @@ NOTE: Does not take effect until you reload the model. ModelsView - + No Models Installed - + Install a model to get started using GPT4All - - + + + Add Model - + Shows the add model view - + Installed Models - + Locally installed chat models - + Model file - + Model file to be downloaded - + Description - + File description - + Cancel - + Resume - + Stop/restart/start the download - + Remove - + Remove model from filesystem - - + + Install - + Install online model - + <strong><font size="1"><a href="#error">Error</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> - + ERROR: $API_KEY is empty. - + ERROR: $BASE_URL is empty. - + enter $BASE_URL - + ERROR: $MODEL_NAME is empty. - + enter $MODEL_NAME - + %1 GB - + ? - + Describes an error that occurred when downloading - + Error for incompatible hardware - + Download progressBar - + Shows the progress made in the download - + Download speed - + Download speed in bytes/kilobytes/megabytes per second - + Calculating... - - - - + + + + Whether the file hash is being calculated - + Busy indicator - + Displayed when the file hash is being calculated - + enter $API_KEY - + File size - + RAM required - + Parameters - + Quant - + Type @@ -2007,12 +2018,12 @@ NOTE: Does not take effect until you reload the model. MyFancyLink - + Fancy link - + A stylized link @@ -2020,7 +2031,7 @@ NOTE: Does not take effect until you reload the model. MySettingsStack - + Please choose a directory @@ -2028,12 +2039,12 @@ NOTE: Does not take effect until you reload the model. MySettingsTab - + Restore Defaults - + Restores settings dialog to a default state @@ -2041,12 +2052,12 @@ NOTE: Does not take effect until you reload the model. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2055,47 +2066,47 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O - + Terms for opt-in - + Describes what will happen when you opt-in - + Please provide a name for attribution (optional) - + Attribution (optional) - + Provide attribution - + Enable - + Enable opt-in - + Cancel - + Cancel opt-in @@ -2103,17 +2114,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O NewVersionDialog - + New version is available - + Update - + Update to new version @@ -2121,17 +2132,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O PopupDialog - + Reveals a shortlived help balloon - + Busy indicator - + Displayed when the popup is showing busy @@ -2139,28 +2150,28 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O SettingsView - - + + Settings - + Contains various application settings - + Application - + Model - + LocalDocs @@ -2168,29 +2179,29 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O StartupDialog - + Welcome! - + ### Release notes %1### Contributors %2 - + Release notes - + Release notes for this version - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2208,71 +2219,71 @@ model release that uses your data! - + Terms for opt-in - + Describes what will happen when you opt-in - - + + Opt-in for anonymous usage statistics - - + + Yes - + Allow opt-in for anonymous usage statistics - - + + No - + Opt-out for anonymous usage statistics - + Allow opt-out for anonymous usage statistics - - + + Opt-in for network - + Allow opt-in for network - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake - + Opt-out for network - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake @@ -2280,23 +2291,23 @@ model release that uses your data! SwitchModelDialog - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? - + Continue - + Continue with model loading - - + + Cancel @@ -2304,32 +2315,32 @@ model release that uses your data! ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) - + Please provide a better response... - + Submit - + Submits the user's response - + Cancel - + Closes the response dialog @@ -2337,125 +2348,125 @@ model release that uses your data! main - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. - + Connection to datalake failed. - + Saving chats. - + Network dialog - + opt-in to share feedback/conversations - + Home view - + Home view of application - + Home - + Chat view - + Chat view to interact with models - + Chats - - + + Models - + Models view for installed models - - + + LocalDocs - + LocalDocs view to configure and use local docs - - + + Settings - + Settings view for application configuration - + The datalake is enabled - + Using a network model - + Server mode is enabled - + Installed models - + View of installed models diff --git a/gpt4all-chat/translations/gpt4all_es_MX.ts b/gpt4all-chat/translations/gpt4all_es_MX.ts index 0c9f516f60c5..a32901026e51 100644 --- a/gpt4all-chat/translations/gpt4all_es_MX.ts +++ b/gpt4all-chat/translations/gpt4all_es_MX.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections ← Colecciones existentes - + Add Document Collection Agregar colección de documentos - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. Agregue una carpeta que contenga archivos de texto plano, PDFs o Markdown. Configure extensiones adicionales en Configuración. - + Please choose a directory Por favor, elija un directorio - + Name Nombre - + Collection name... Nombre de la colección... - + Name of the collection to add (Required) Nombre de la colección a agregar (Requerido) - + Folder Carpeta - + Folder path... Ruta de la carpeta... - + Folder path to documents (Required) Ruta de la carpeta de documentos (Requerido) - + Browse Explorar - + Create Collection Crear colección @@ -67,288 +67,288 @@ AddModelView - + ← Existing Models ← Modelos existentes - + Explore Models Explorar modelos - + Discover and download models by keyword search... Descubre y descarga modelos mediante búsqueda por palabras clave... - + Text field for discovering and filtering downloadable models Campo de texto para descubrir y filtrar modelos descargables - + Initiate model discovery and filtering Iniciar descubrimiento y filtrado de modelos - + Triggers discovery and filtering of models Activa el descubrimiento y filtrado de modelos - + Default Predeterminado - + Likes Me gusta - + Downloads Descargas - + Recent Reciente - + Asc Asc - + Desc Desc - + None Ninguno - + Searching · %1 Buscando · %1 - + Sort by: %1 Ordenar por: %1 - + Sort dir: %1 Dirección de ordenamiento: %1 - + Limit: %1 Límite: %1 - + Network error: could not retrieve %1 Error de red: no se pudo recuperar %1 - - + + Busy indicator Indicador de ocupado - + Displayed when the models request is ongoing Se muestra cuando la solicitud de modelos está en curso - + Model file Archivo del modelo - + Model file to be downloaded Archivo del modelo a descargar - + Description Descripción - + File description Descripción del archivo - + Cancel Cancelar - + Resume Reanudar - + Download Descargar - + Stop/restart/start the download Detener/reiniciar/iniciar la descarga - + Remove Eliminar - + Remove model from filesystem Eliminar modelo del sistema de archivos - - + + Install Instalar - + Install online model Instalar modelo en línea - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Error</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">ADVERTENCIA: No recomendado para tu hardware. El modelo requiere más memoria (%1 GB) de la que tu sistema tiene disponible (%2).</strong></font> - + %1 GB %1 GB - - + + ? ? - + Describes an error that occurred when downloading Describe un error que ocurrió durante la descarga - + Error for incompatible hardware Error por hardware incompatible - + Download progressBar Barra de progreso de descarga - + Shows the progress made in the download Muestra el progreso realizado en la descarga - + Download speed Velocidad de descarga - + Download speed in bytes/kilobytes/megabytes per second Velocidad de descarga en bytes/kilobytes/megabytes por segundo - + Calculating... Calculando... - - - - + + + + Whether the file hash is being calculated Si se está calculando el hash del archivo - + Displayed when the file hash is being calculated Se muestra cuando se está calculando el hash del archivo - + enter $API_KEY ingrese $API_KEY - + File size Tamaño del archivo - + RAM required RAM requerida - + Parameters Parámetros - + Quant Cuantificación - + Type Tipo - + ERROR: $API_KEY is empty. ERROR: $API_KEY está vacío. - + ERROR: $BASE_URL is empty. ERROR: $BASE_URL está vacío. - + enter $BASE_URL ingrese $BASE_URL - + ERROR: $MODEL_NAME is empty. ERROR: $MODEL_NAME está vacío. - + enter $MODEL_NAME ingrese $MODEL_NAME @@ -356,22 +356,22 @@ ApplicationSettings - + Application Aplicación - + Network dialog Diálogo de red - + opt-in to share feedback/conversations optar por compartir comentarios/conversaciones - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -382,57 +382,57 @@ - + Error dialog Diálogo de error - + Application Settings Configuración de la aplicación - + General General - + Theme Tema - + The application color scheme. El esquema de colores de la aplicación. - + Dark Oscuro - + Light Claro - + LegacyDark Oscuro legado - + Font Size Tamaño de fuente - + The size of text in the application. El tamaño del texto en la aplicación. - + Device Dispositivo @@ -441,127 +441,127 @@ El dispositivo de cómputo utilizado para la generación de texto. "Auto" utiliza Vulkan o Metal. - + Small Pequeño - + Medium Mediano - + Large Grande - + Language and Locale Idioma y configuración regional - + The language and locale you wish to use. El idioma y la configuración regional que deseas usar. - + Default Model Modelo predeterminado - + The preferred model for new chats. Also used as the local server fallback. El modelo preferido para nuevos chats. También se utiliza como respaldo del servidor local. - + Suggestion Mode Modo de sugerencia - + Generate suggested follow-up questions at the end of responses. Generar preguntas de seguimiento sugeridas al final de las respuestas. - + When chatting with LocalDocs Al chatear con LocalDocs - + Whenever possible Siempre que sea posible - + Never Nunca - + Download Path Ruta de descarga - + Where to store local models and the LocalDocs database. Dónde almacenar los modelos locales y la base de datos de LocalDocs. - + Browse Explorar - + Choose where to save model files Elegir dónde guardar los archivos del modelo - + Enable Datalake Habilitar Datalake - + Send chats and feedback to the GPT4All Open-Source Datalake. Enviar chats y comentarios al Datalake de código abierto de GPT4All. - + Advanced Avanzado - + CPU Threads Hilos de CPU - + The number of CPU threads used for inference and embedding. El número de hilos de CPU utilizados para inferencia e incrustación. - + Save Chat Context Guardar contexto del chat - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. Guardar el estado del modelo de chat en el disco para una carga más rápida. ADVERTENCIA: Usa ~2GB por chat. - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. Exponer un servidor compatible con OpenAI a localhost. ADVERTENCIA: Resulta en un mayor uso de recursos. - + Enable Local Server Habilitar servidor local @@ -572,43 +572,43 @@ en un mayor uso de recursos. - + API Server Port Puerto del servidor API - + The port to use for the local server. Requires restart. El puerto a utilizar para el servidor local. Requiere reinicio. - + Check For Updates Buscar actualizaciones - + Manually check for an update to GPT4All. Buscar manualmente una actualización para GPT4All. - + Updates Actualizaciones - + System Locale Regional del sistema - + The compute device used for text generation. El dispositivo de cómputo utilizado para la generación de texto. - - + + Application default Predeterminado de la aplicación @@ -632,13 +632,13 @@ Chat - - + + New Chat Nuevo chat - + Server Chat Chat del servidor @@ -646,12 +646,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server ERROR: Ocurrió un error de red al conectar con el servidor API - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished obtuvo Error HTTP %1 %2 @@ -659,62 +659,62 @@ ChatDrawer - + Drawer Cajón - + Main navigation drawer Cajón de navegación principal - + + New Chat + Nuevo chat - + Create a new chat Crear un nuevo chat - + Select the current chat or edit the chat when in edit mode Seleccionar el chat actual o editar el chat cuando esté en modo de edición - + Edit chat name Editar nombre del chat - + Save chat name Guardar nombre del chat - + Delete chat Eliminar chat - + Confirm chat deletion Confirmar eliminación del chat - + Cancel chat deletion Cancelar eliminación del chat - + List of chats Lista de chats - + List of chats in the drawer dialog Lista de chats en el diálogo del cajón @@ -722,32 +722,32 @@ ChatListModel - + TODAY HOY - + THIS WEEK ESTA SEMANA - + THIS MONTH ESTE MES - + LAST SIX MONTHS ÚLTIMOS SEIS MESES - + THIS YEAR ESTE AÑO - + LAST YEAR AÑO PASADO @@ -755,148 +755,148 @@ ChatView - + <h3>Warning</h3><p>%1</p> <h3>Advertencia</h3><p>%1</p> - + Switch model dialog Diálogo para cambiar de modelo - + Warn the user if they switch models, then context will be erased Advertir al usuario si cambia de modelo, entonces se borrará el contexto - + Conversation copied to clipboard. Conversación copiada al portapapeles. - + Code copied to clipboard. Código copiado al portapapeles. - + Chat panel Panel de chat - + Chat panel with options Panel de chat con opciones - + Reload the currently loaded model Recargar el modelo actualmente cargado - + Eject the currently loaded model Expulsar el modelo actualmente cargado - + No model installed. No hay modelo instalado. - + Model loading error. Error al cargar el modelo. - + Waiting for model... Esperando al modelo... - + Switching context... Cambiando contexto... - + Choose a model... Elige un modelo... - + Not found: %1 No encontrado: %1 - + The top item is the current model El elemento superior es el modelo actual - - + + LocalDocs DocumentosLocales - + Add documents Agregar documentos - + add collections of documents to the chat agregar colecciones de documentos al chat - + Load the default model Cargar el modelo predeterminado - + Loads the default model which can be changed in settings Carga el modelo predeterminado que se puede cambiar en la configuración - + No Model Installed No hay modelo instalado - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>Se encontró un error al cargar el modelo:</h3><br><i>"%1"</i><br><br>Los fallos en la carga de modelos pueden ocurrir por varias razones, pero las causas más comunes incluyen un formato de archivo incorrecto, una descarga incompleta o corrupta, un tipo de archivo equivocado, RAM del sistema insuficiente o un tipo de modelo incompatible. Aquí hay algunas sugerencias para resolver el problema:<br><ul><li>Asegúrate de que el archivo del modelo tenga un formato y tipo compatibles<li>Verifica que el archivo del modelo esté completo en la carpeta de descargas<li>Puedes encontrar la carpeta de descargas en el diálogo de configuración<li>Si has cargado el modelo manualmente, asegúrate de que el archivo no esté corrupto verificando el md5sum<li>Lee más sobre qué modelos son compatibles en nuestra <a href="https://docs.gpt4all.io/">documentación</a> para la interfaz gráfica<li>Visita nuestro <a href="https://discord.gg/4M2QFmTt2k">canal de discord</a> para obtener ayuda - + Install a Model Instalar un modelo - + Shows the add model view Muestra la vista de agregar modelo - + Conversation with the model Conversación con el modelo - + prompt / response pairs from the conversation pares de pregunta / respuesta de la conversación - + GPT4All GPT4All - + You @@ -905,129 +905,129 @@ recalculando contexto ... - + response stopped ... respuesta detenida ... - + processing ... procesando ... - + generating response ... generando respuesta ... - + generating questions ... generando preguntas ... - - + + Copy Copiar - + Copy Message Copiar mensaje - + Disable markdown Desactivar markdown - + Enable markdown Activar markdown - + Thumbs up Me gusta - + Gives a thumbs up to the response Da un me gusta a la respuesta - + Thumbs down No me gusta - + Opens thumbs down dialog Abre el diálogo de no me gusta - + Suggested follow-ups Seguimientos sugeridos - + Erase and reset chat session Borrar y reiniciar sesión de chat - + Copy chat session to clipboard Copiar sesión de chat al portapapeles - + Redo last chat response Rehacer última respuesta del chat - + Stop generating Detener generación - + Stop the current response generation Detener la generación de la respuesta actual - + Reloads the model Recarga el modelo - - + + Reload · %1 Recargar · %1 - + Loading · %1 Cargando · %1 - + Load · %1 (default) → Cargar · %1 (predeterminado) → - + retrieving localdocs: %1 ... recuperando documentos locales: %1 ... - + searching localdocs: %1 ... buscando en documentos locales: %1 ... - + %n Source(s) %n Fuente @@ -1035,47 +1035,47 @@ - + Send a message... Enviar un mensaje... - + Load a model to continue... Carga un modelo para continuar... - + Send messages/prompts to the model Enviar mensajes/indicaciones al modelo - + Cut Cortar - + Paste Pegar - + Select All Seleccionar todo - + Send message Enviar mensaje - + Sends the message/prompt contained in textfield to the model Envía el mensaje/indicación contenido en el campo de texto al modelo - + GPT4All requires that you install at least one model to get started GPT4All requiere que instale al menos un @@ -1083,7 +1083,7 @@ modelo para comenzar - + restoring from text ... restaurando desde texto ... @@ -1091,12 +1091,12 @@ modelo para comenzar CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results Advertencia: buscar en colecciones mientras se indexan puede devolver resultados incompletos - + %n file(s) %n archivo @@ -1104,7 +1104,7 @@ modelo para comenzar - + %n word(s) %n palabra @@ -1112,17 +1112,17 @@ modelo para comenzar - + Updating Actualizando - + + Add Docs + Agregar documentos - + Select a collection to make it available to the chat model. Seleccione una colección para hacerla disponible al modelo de chat. @@ -1130,37 +1130,37 @@ modelo para comenzar Download - + Model "%1" is installed successfully. El modelo "%1" se ha instalado correctamente. - + ERROR: $MODEL_NAME is empty. ERROR: $MODEL_NAME está vacío. - + ERROR: $API_KEY is empty. ERROR: $API_KEY está vacía. - + ERROR: $BASE_URL is invalid. ERROR: $BASE_URL no es válida. - + ERROR: Model "%1 (%2)" is conflict. ERROR: El modelo "%1 (%2)" está en conflicto. - + Model "%1 (%2)" is installed successfully. El modelo "%1 (%2)" se ha instalado correctamente. - + Model "%1" is removed. El modelo "%1" ha sido eliminado. @@ -1168,92 +1168,92 @@ modelo para comenzar HomeView - + Welcome to GPT4All Bienvenido a GPT4All - + The privacy-first LLM chat application La aplicación de chat LLM que prioriza la privacidad - + Start chatting Comenzar a chatear - + Start Chatting Iniciar chat - + Chat with any LLM Chatear con cualquier LLM - + LocalDocs DocumentosLocales - + Chat with your local files Chatear con tus archivos locales - + Find Models Buscar modelos - + Explore and download models Explorar y descargar modelos - + Latest news Últimas noticias - + Latest news from GPT4All Últimas noticias de GPT4All - + Release Notes Notas de la versión - + Documentation Documentación - + Discord Discord - + X (Twitter) X (Twitter) - + Github Github - + nomic.ai nomic.ai - + Subscribe to Newsletter Suscribirse al boletín @@ -1261,22 +1261,22 @@ modelo para comenzar LocalDocsSettings - + LocalDocs DocumentosLocales - + LocalDocs Settings Configuración de DocumentosLocales - + Indexing Indexación - + Allowed File Extensions Extensiones de archivo permitidas @@ -1287,12 +1287,12 @@ modelo para comenzar archivos con estas extensiones. - + Embedding Incrustación - + Use Nomic Embed API Usar API de incrustación Nomic @@ -1303,77 +1303,77 @@ modelo para comenzar local privado. Requiere reinicio. - + Nomic API Key Clave API de Nomic - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. Clave API para usar con Nomic Embed. Obtén una en la <a href="https://atlas.nomic.ai/cli-login">página de claves API</a> de Atlas. Requiere reinicio. - + Embeddings Device Dispositivo de incrustaciones - + The compute device used for embeddings. Requires restart. El dispositivo de cómputo utilizado para las incrustaciones. Requiere reinicio. - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. Lista separada por comas. LocalDocs solo intentará procesar archivos con estas extensiones. - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. Incrustar documentos usando la API rápida de Nomic en lugar de un modelo local privado. Requiere reinicio. - + Application default Predeterminado de la aplicación - + Display Visualización - + Show Sources Mostrar fuentes - + Display the sources used for each response. Mostrar las fuentes utilizadas para cada respuesta. - + Advanced Avanzado - + Warning: Advanced usage only. Advertencia: Solo para uso avanzado. - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. Valores demasiado grandes pueden causar fallos en localdocs, respuestas extremadamente lentas o falta de respuesta. En términos generales, los {N caracteres x N fragmentos} se añaden a la ventana de contexto del modelo. Más información <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">aquí</a>. - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. Número de caracteres por fragmento de documento. Números más grandes aumentan la probabilidad de respuestas verídicas, pero también resultan en una generación más lenta. - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. Máximo de N mejores coincidencias de fragmentos de documentos recuperados para añadir al contexto del prompt. Números más grandes aumentan la probabilidad de respuestas verídicas, pero también resultan en una generación más lenta. @@ -1383,12 +1383,12 @@ modelo para comenzar Valores demasiado grandes pueden causar fallos en documentos locales, respuestas extremadamente lentas o falta de respuesta. En términos generales, los {N caracteres x N fragmentos} se agregan a la ventana de contexto del modelo. Más información <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">aquí</a>. - + Document snippet size (characters) Tamaño del fragmento de documento (caracteres) - + Max document snippets per prompt Máximo de fragmentos de documento por indicación @@ -1396,17 +1396,17 @@ modelo para comenzar LocalDocsView - + LocalDocs DocumentosLocales - + Chat with your local files Chatea con tus archivos locales - + + Add Collection + Agregar colección @@ -1415,97 +1415,97 @@ modelo para comenzar ERROR: La base de datos de DocumentosLocales no es válida. - + No Collections Installed No hay colecciones instaladas - + Install a collection of local documents to get started using this feature Instala una colección de documentos locales para comenzar a usar esta función - + + Add Doc Collection + Agregar colección de documentos - + Shows the add model view Muestra la vista de agregar modelo - + Indexing progressBar Barra de progreso de indexación - + Shows the progress made in the indexing Muestra el progreso realizado en la indexación - + ERROR ERROR - + INDEXING INDEXANDO - + EMBEDDING INCRUSTANDO - + REQUIRES UPDATE REQUIERE ACTUALIZACIÓN - + READY LISTO - + INSTALLING INSTALANDO - + Indexing in progress Indexación en progreso - + Embedding in progress Incrustación en progreso - + This collection requires an update after version change Esta colección requiere una actualización después del cambio de versión - + Automatically reindexes upon changes to the folder Reindexación automática al cambiar la carpeta - + Installation in progress Instalación en progreso - + % % - + %n file(s) %n archivo @@ -1513,7 +1513,7 @@ modelo para comenzar - + %n word(s) %n palabra @@ -1521,32 +1521,32 @@ modelo para comenzar - + Remove Eliminar - + Rebuild Reconstruir - + Reindex this folder from scratch. This is slow and usually not needed. Reindexar esta carpeta desde cero. Esto es lento y generalmente no es necesario. - + Update Actualizar - + Update the collection to the new version. This is a slow operation. Actualizar la colección a la nueva versión. Esta es una operación lenta. - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. <h3>ERROR: No se puede acceder a la base de datos LocalDocs o no es válida.</h3><br><i>Nota: Necesitará reiniciar después de intentar cualquiera de las siguientes soluciones sugeridas.</i><br><ul><li>Asegúrese de que la carpeta establecida como <b>Ruta de Descarga</b> exista en el sistema de archivos.</li><li>Verifique la propiedad y los permisos de lectura y escritura de la <b>Ruta de Descarga</b>.</li><li>Si hay un archivo <b>localdocs_v2.db</b>, verifique también su propiedad y permisos de lectura/escritura.</li></ul><br>Si el problema persiste y hay archivos 'localdocs_v*.db' presentes, como último recurso puede<br>intentar hacer una copia de seguridad y eliminarlos. Sin embargo, tendrá que recrear sus colecciones. @@ -1554,7 +1554,7 @@ modelo para comenzar ModelList - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>Requiere clave API personal de OpenAI.</li><li>ADVERTENCIA: ¡Enviará sus chats a OpenAI!</li><li>Su clave API se almacenará en el disco</li><li>Solo se usará para comunicarse con OpenAI</li><li>Puede solicitar una clave API <a href="https://platform.openai.com/account/api-keys">aquí.</a></li> @@ -1565,62 +1565,73 @@ modelo para comenzar OpenAI</strong><br> %1 - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 <strong>Modelo ChatGPT GPT-3.5 Turbo de OpenAI</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* Aunque pagues a OpenAI por ChatGPT-4, esto no garantiza el acceso a la clave API. Contacta a OpenAI para más información. - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 <strong>Modelo ChatGPT GPT-4 de OpenAI</strong><br> %1 %2 - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>Requiere una clave API personal de Mistral.</li><li>ADVERTENCIA: ¡Enviará tus chats a Mistral!</li><li>Tu clave API se almacenará en el disco</li><li>Solo se usará para comunicarse con Mistral</li><li>Puedes solicitar una clave API <a href="https://console.mistral.ai/user/api-keys">aquí</a>.</li> - + <strong>Mistral Tiny model</strong><br> %1 <strong>Modelo Mistral Tiny</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 <strong>Modelo Mistral Small</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 <strong>Modelo Mistral Medium</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>Creado por %1.</strong><br><ul><li>Publicado el %2.<li>Este modelo tiene %3 me gusta.<li>Este modelo tiene %4 descargas.<li>Más información puede encontrarse <a href="https://huggingface.co/%5">aquí.</a></ul> - + %1 (%2) %1 (%2) - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>Modelo de API compatible con OpenAI</strong><br><ul><li>Clave API: %1</li><li>URL base: %2</li><li>Nombre del modelo: %3</li></ul> - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>Requiere una clave API personal y la URL base de la API.</li><li>ADVERTENCIA: ¡Enviará sus chats al servidor de API compatible con OpenAI que especificó!</li><li>Su clave API se almacenará en el disco</li><li>Solo se utilizará para comunicarse con el servidor de API compatible con OpenAI</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>Conectar al servidor de API compatible con OpenAI</strong><br> %1 @@ -1628,87 +1639,87 @@ modelo para comenzar ModelSettings - + Model Modelo - + Model Settings Configuración del modelo - + Clone Clonar - + Remove Eliminar - + Name Nombre - + Model File Archivo del modelo - + System Prompt Indicación del sistema - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. Prefijado al inicio de cada conversación. Debe contener los tokens de encuadre apropiados. - + Prompt Template Plantilla de indicación - + The template that wraps every prompt. La plantilla que envuelve cada indicación. - + Must contain the string "%1" to be replaced with the user's input. Debe contener la cadena "%1" para ser reemplazada con la entrada del usuario. - + Chat Name Prompt Indicación para el nombre del chat - + Prompt used to automatically generate chat names. Indicación utilizada para generar automáticamente nombres de chat. - + Suggested FollowUp Prompt Indicación de seguimiento sugerida - + Prompt used to generate suggested follow-up questions. Indicación utilizada para generar preguntas de seguimiento sugeridas. - + Context Length Longitud del contexto - + Number of input and output tokens the model sees. Número de tokens de entrada y salida que el modelo ve. @@ -1719,128 +1730,128 @@ NOTE: Does not take effect until you reload the model. NOTA: No tiene efecto hasta que recargues el modelo. - + Temperature Temperatura - + Randomness of model output. Higher -> more variation. Aleatoriedad de la salida del modelo. Mayor -> más variación. - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. La temperatura aumenta las probabilidades de elegir tokens menos probables. NOTA: Una temperatura más alta da resultados más creativos pero menos predecibles. - + Top-P Top-P - + Nucleus Sampling factor. Lower -> more predictable. Factor de muestreo de núcleo. Menor -> más predecible. - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. Solo se pueden elegir los tokens más probables hasta una probabilidad total de top_p. NOTA: Evita elegir tokens altamente improbables. - + Min-P Min-P - + Minimum token probability. Higher -> more predictable. Probabilidad mínima del token. Mayor -> más predecible. - + Sets the minimum relative probability for a token to be considered. Establece la probabilidad relativa mínima para que un token sea considerado. - + Top-K Top-K - + Size of selection pool for tokens. Tamaño del grupo de selección para tokens. - + Only the top K most likely tokens will be chosen from. Solo se elegirán los K tokens más probables. - + Max Length Longitud máxima - + Maximum response length, in tokens. Longitud máxima de respuesta, en tokens. - + Prompt Batch Size Tamaño del lote de indicaciones - + The batch size used for prompt processing. El tamaño del lote utilizado para el procesamiento de indicaciones. - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. Cantidad de tokens de prompt a procesar de una vez. NOTA: Valores más altos pueden acelerar la lectura de prompts, pero usarán más RAM. - + Repeat Penalty Penalización por repetición - + Repetition penalty factor. Set to 1 to disable. Factor de penalización por repetición. Establecer a 1 para desactivar. - + Repeat Penalty Tokens Tokens de penalización por repetición - + Number of previous tokens used for penalty. Número de tokens anteriores utilizados para la penalización. - + GPU Layers Capas de GPU - + Number of model layers to load into VRAM. Número de capas del modelo a cargar en la VRAM. - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. @@ -1849,7 +1860,7 @@ Usar más contexto del que el modelo fue entrenado producirá resultados deficie NOTA: No surtirá efecto hasta que recargue el modelo. - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1861,217 +1872,217 @@ NOTA: No surte efecto hasta que recargue el modelo. ModelsView - + No Models Installed No hay modelos instalados - + Install a model to get started using GPT4All Instala un modelo para empezar a usar GPT4All - - + + + Add Model + Agregar modelo - + Shows the add model view Muestra la vista de agregar modelo - + Installed Models Modelos instalados - + Locally installed chat models Modelos de chat instalados localmente - + Model file Archivo del modelo - + Model file to be downloaded Archivo del modelo a descargar - + Description Descripción - + File description Descripción del archivo - + Cancel Cancelar - + Resume Reanudar - + Stop/restart/start the download Detener/reiniciar/iniciar la descarga - + Remove Eliminar - + Remove model from filesystem Eliminar modelo del sistema de archivos - - + + Install Instalar - + Install online model Instalar modelo en línea - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Error</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">ADVERTENCIA: No recomendado para su hardware. El modelo requiere más memoria (%1 GB) de la que su sistema tiene disponible (%2).</strong></font> - + %1 GB %1 GB - + ? ? - + Describes an error that occurred when downloading Describe un error que ocurrió durante la descarga - + Error for incompatible hardware Error por hardware incompatible - + Download progressBar Barra de progreso de descarga - + Shows the progress made in the download Muestra el progreso realizado en la descarga - + Download speed Velocidad de descarga - + Download speed in bytes/kilobytes/megabytes per second Velocidad de descarga en bytes/kilobytes/megabytes por segundo - + Calculating... Calculando... - - - - + + + + Whether the file hash is being calculated Si se está calculando el hash del archivo - + Busy indicator Indicador de ocupado - + Displayed when the file hash is being calculated Se muestra cuando se está calculando el hash del archivo - + enter $API_KEY ingrese $API_KEY - + File size Tamaño del archivo - + RAM required RAM requerida - + Parameters Parámetros - + Quant Cuantificación - + Type Tipo - + ERROR: $API_KEY is empty. ERROR: $API_KEY está vacía. - + ERROR: $BASE_URL is empty. ERROR: $BASE_URL está vacía. - + enter $BASE_URL ingrese $BASE_URL - + ERROR: $MODEL_NAME is empty. ERROR: $MODEL_NAME está vacío. - + enter $MODEL_NAME ingrese $MODEL_NAME @@ -2079,12 +2090,12 @@ NOTA: No surte efecto hasta que recargue el modelo. MyFancyLink - + Fancy link Enlace elegante - + A stylized link Un enlace estilizado @@ -2092,7 +2103,7 @@ NOTA: No surte efecto hasta que recargue el modelo. MySettingsStack - + Please choose a directory Por favor, elija un directorio @@ -2100,12 +2111,12 @@ NOTA: No surte efecto hasta que recargue el modelo. MySettingsTab - + Restore Defaults Restaurar valores predeterminados - + Restores settings dialog to a default state Restaura el diálogo de configuración a su estado predeterminado @@ -2113,7 +2124,7 @@ NOTA: No surte efecto hasta que recargue el modelo. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. Contribuir datos al Datalake de código abierto de GPT4All. @@ -2131,7 +2142,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O NOTA: Al activar esta función, enviará sus datos al Datalake de código abierto de GPT4All. No debe esperar privacidad en el chat cuando esta función esté habilitada. Sin embargo, puede esperar una atribución opcional si lo desea. Sus datos de chat estarán disponibles abiertamente para que cualquiera los descargue y serán utilizados por Nomic AI para mejorar futuros modelos de GPT4All. Nomic AI conservará toda la información de atribución adjunta a sus datos y se le acreditará como contribuyente en cualquier lanzamiento de modelo GPT4All que utilice sus datos. - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2144,47 +2155,47 @@ Cuando un modelo GPT4All te responda y hayas aceptado participar, tu conversaci NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Código Abierto de GPT4All. No debes esperar privacidad en el chat cuando esta función esté habilitada. Sin embargo, puedes esperar una atribución opcional si lo deseas. Tus datos de chat estarán disponibles abiertamente para que cualquiera los descargue y serán utilizados por Nomic AI para mejorar futuros modelos de GPT4All. Nomic AI conservará toda la información de atribución adjunta a tus datos y se te acreditará como contribuyente en cualquier lanzamiento de modelo GPT4All que utilice tus datos. - + Terms for opt-in Términos para optar por participar - + Describes what will happen when you opt-in Describe lo que sucederá cuando opte por participar - + Please provide a name for attribution (optional) Por favor, proporcione un nombre para la atribución (opcional) - + Attribution (optional) Atribución (opcional) - + Provide attribution Proporcionar atribución - + Enable Habilitar - + Enable opt-in Habilitar participación - + Cancel Cancelar - + Cancel opt-in Cancelar participación @@ -2192,17 +2203,17 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi NewVersionDialog - + New version is available Nueva versión disponible - + Update Actualizar - + Update to new version Actualizar a nueva versión @@ -2210,17 +2221,17 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi PopupDialog - + Reveals a shortlived help balloon Muestra un globo de ayuda de corta duración - + Busy indicator Indicador de ocupado - + Displayed when the popup is showing busy Se muestra cuando la ventana emergente está ocupada @@ -2235,28 +2246,28 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi SettingsView - - + + Settings Configuración - + Contains various application settings Contiene varias configuraciones de la aplicación - + Application Aplicación - + Model Modelo - + LocalDocs DocumentosLocales @@ -2264,17 +2275,17 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi StartupDialog - + Welcome! ¡Bienvenido! - + Release notes Notas de la versión - + Release notes for this version Notas de la versión para esta versión @@ -2300,76 +2311,76 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi - + Terms for opt-in Términos para aceptar - + Describes what will happen when you opt-in Describe lo que sucederá cuando acepte - - + + Opt-in for anonymous usage statistics Aceptar estadísticas de uso anónimas - - + + Yes - + Allow opt-in for anonymous usage statistics Permitir aceptación de estadísticas de uso anónimas - - + + No No - + Opt-out for anonymous usage statistics Rechazar estadísticas de uso anónimas - + Allow opt-out for anonymous usage statistics Permitir rechazo de estadísticas de uso anónimas - - + + Opt-in for network Aceptar para la red - + Allow opt-in for network Permitir aceptación para la red - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake Permitir compartir anónimamente los chats con el Datalake de GPT4All - + Opt-out for network Rechazar para la red - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake Permitir rechazar el compartir anónimo de chats con el Datalake de GPT4All - + ### Release notes %1### Contributors %2 @@ -2379,7 +2390,7 @@ NOTA: Al activar esta función, estarás enviando tus datos al Datalake de Códi - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2419,23 +2430,23 @@ lanzamiento de modelo GPT4All que utilice sus datos. actual. ¿Desea continuar? - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>Advertencia:</b> cambiar el modelo borrará la conversación actual. ¿Deseas continuar? - + Continue Continuar - + Continue with model loading Continuar con la carga del modelo - - + + Cancel Cancelar @@ -2443,33 +2454,33 @@ lanzamiento de modelo GPT4All que utilice sus datos. ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) Por favor, edite el texto a continuación para proporcionar una mejor respuesta. (opcional) - + Please provide a better response... Por favor, proporcione una mejor respuesta... - + Submit Enviar - + Submits the user's response Envía la respuesta del usuario - + Cancel Cancelar - + Closes the response dialog Cierra el diálogo de respuesta @@ -2477,126 +2488,126 @@ lanzamiento de modelo GPT4All que utilice sus datos. main - + GPT4All v%1 GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>Se encontró un error al iniciar:</h3><br><i>"Se detectó hardware incompatible."</i><br><br>Desafortunadamente, tu CPU no cumple con los requisitos mínimos para ejecutar este programa. En particular, no soporta instrucciones AVX, las cuales este programa requiere para ejecutar con éxito un modelo de lenguaje grande moderno. La única solución en este momento es actualizar tu hardware a una CPU más moderna.<br><br>Consulta aquí para más información: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>Se encontró un error al iniciar:</h3><br><i>"No se puede acceder al archivo de configuración."</i><br><br>Desafortunadamente, algo está impidiendo que el programa acceda al archivo de configuración. Esto podría ser causado por permisos incorrectos en el directorio de configuración local de la aplicación donde se encuentra el archivo de configuración. Visita nuestro <a href="https://discord.gg/4M2QFmTt2k">canal de Discord</a> para obtener ayuda. - + Connection to datalake failed. La conexión al datalake falló. - + Saving chats. Guardando chats. - + Network dialog Diálogo de red - + opt-in to share feedback/conversations optar por compartir comentarios/conversaciones - + Home view Vista de inicio - + Home view of application Vista de inicio de la aplicación - + Home Inicio - + Chat view Vista de chat - + Chat view to interact with models Vista de chat para interactuar con modelos - + Chats Chats - - + + Models Modelos - + Models view for installed models Vista de modelos para modelos instalados - - + + LocalDocs Docs Locales - + LocalDocs view to configure and use local docs Vista de DocumentosLocales para configurar y usar documentos locales - - + + Settings Config. - + Settings view for application configuration Vista de configuración para la configuración de la aplicación - + The datalake is enabled El datalake está habilitado - + Using a network model Usando un modelo de red - + Server mode is enabled El modo servidor está habilitado - + Installed models Modelos instalados - + View of installed models Vista de modelos instalados diff --git a/gpt4all-chat/translations/gpt4all_it_IT.ts b/gpt4all-chat/translations/gpt4all_it_IT.ts index 85360148c38d..7a6ec7e14ce2 100644 --- a/gpt4all-chat/translations/gpt4all_it_IT.ts +++ b/gpt4all-chat/translations/gpt4all_it_IT.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections ← Raccolte esistenti - + Add Document Collection Aggiungi raccolta documenti - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. Aggiungi una cartella contenente file di testo semplice, PDF o Markdown. Configura estensioni aggiuntive in Settaggi. - + Please choose a directory Scegli una cartella - + Name Nome - + Collection name... Nome della raccolta... - + Name of the collection to add (Required) Nome della raccolta da aggiungere (Obbligatorio) - + Folder Cartella - + Folder path... Percorso cartella... - + Folder path to documents (Required) Percorso della cartella dei documenti (richiesto) - + Browse Esplora - + Create Collection Crea raccolta @@ -67,288 +67,288 @@ AddModelView - + ← Existing Models ← Modelli esistenti - + Explore Models Esplora modelli - + Discover and download models by keyword search... Scopri e scarica i modelli tramite ricerca per parole chiave... - + Text field for discovering and filtering downloadable models Campo di testo per scoprire e filtrare i modelli scaricabili - + Initiate model discovery and filtering Avvia rilevamento e filtraggio dei modelli - + Triggers discovery and filtering of models Attiva la scoperta e il filtraggio dei modelli - + Default Predefinito - + Likes Mi piace - + Downloads Scaricamenti - + Recent Recenti - + Asc Asc - + Desc Disc - + None Niente - + Searching · %1 Ricerca · %1 - + Sort by: %1 Ordina per: %1 - + Sort dir: %1 Direzione ordinamento: %1 - + Limit: %1 Limite: %1 - + Network error: could not retrieve %1 Errore di rete: impossibile recuperare %1 - - + + Busy indicator Indicatore di occupato - + Displayed when the models request is ongoing Visualizzato quando la richiesta dei modelli è in corso - + Model file File del modello - + Model file to be downloaded File del modello da scaricare - + Description Descrizione - + File description Descrizione del file - + Cancel Annulla - + Resume Riprendi - + Download Scarica - + Stop/restart/start the download Arresta/riavvia/avvia il download - + Remove Rimuovi - + Remove model from filesystem Rimuovi il modello dal sistema dei file - - + + Install Installa - + Install online model Installa il modello online - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">AVVERTENZA: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).</strong></font> - + ERROR: $API_KEY is empty. ERRORE: $API_KEY è vuoto. - + ERROR: $BASE_URL is empty. ERRORE: $BASE_URL non è valido. - + enter $BASE_URL inserisci $BASE_URL - + ERROR: $MODEL_NAME is empty. ERRORE: $MODEL_NAME è vuoto. - + enter $MODEL_NAME inserisci $MODEL_NAME - + %1 GB - - + + ? - + Describes an error that occurred when downloading Descrive un errore che si è verificato durante lo scaricamento - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Errore</a></strong></font> - + Error for incompatible hardware Errore per hardware incompatibile - + Download progressBar Barra di avanzamento dello scaricamento - + Shows the progress made in the download Mostra lo stato di avanzamento dello scaricamento - + Download speed Velocità di scaricamento - + Download speed in bytes/kilobytes/megabytes per second Velocità di scaricamento in byte/kilobyte/megabyte al secondo - + Calculating... Calcolo in corso... - - - - + + + + Whether the file hash is being calculated Se viene calcolato l'hash del file - + Displayed when the file hash is being calculated Visualizzato durante il calcolo dell'hash del file - + enter $API_KEY Inserire $API_KEY - + File size Dimensione del file - + RAM required RAM richiesta - + Parameters Parametri - + Quant Quant - + Type Tipo @@ -356,22 +356,22 @@ ApplicationSettings - + Application Applicazione - + Network dialog Dialogo di rete - + opt-in to share feedback/conversations aderisci per condividere feedback/conversazioni - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -382,87 +382,87 @@ - + Error dialog Dialogo d'errore - + Application Settings Settaggi applicazione - + General Generale - + Theme Tema - + The application color scheme. La combinazione di colori dell'applicazione. - + Dark Scuro - + Light Chiaro - + LegacyDark Scuro Legacy - + Font Size Dimensioni del Font - + The size of text in the application. La dimensione del testo nell'applicazione. - + Small Piccolo - + Medium Medio - + Large Grande - + Language and Locale Lingua e settaggi locali - + The language and locale you wish to use. La lingua e i settaggi locali che vuoi utilizzare. - + System Locale Settaggi locali del sistema - + Device Dispositivo @@ -471,139 +471,139 @@ Il dispositivo di calcolo utilizzato per la generazione del testo. "Auto" utilizza Vulkan o Metal. - + The compute device used for text generation. Il dispositivo di calcolo utilizzato per la generazione del testo. - - + + Application default Applicazione predefinita - + Default Model Modello predefinito - + The preferred model for new chats. Also used as the local server fallback. Il modello preferito per le nuove chat. Utilizzato anche come ripiego del server locale. - + Suggestion Mode Modalità suggerimento - + Generate suggested follow-up questions at the end of responses. Genera le domande di approfondimento suggerite alla fine delle risposte. - + When chatting with LocalDocs Quando chatti con LocalDocs - + Whenever possible Quando possibile - + Never Mai - + Download Path Percorso di scarico - + Where to store local models and the LocalDocs database. Dove archiviare i modelli locali e il database LocalDocs. - + Browse Esplora - + Choose where to save model files Scegli dove salvare i file del modello - + Enable Datalake Abilita Datalake - + Send chats and feedback to the GPT4All Open-Source Datalake. Invia chat e commenti al Datalake Open Source GPT4All. - + Advanced Avanzate - + CPU Threads Thread della CPU Tread CPU - + The number of CPU threads used for inference and embedding. Il numero di thread della CPU utilizzati per l'inferenza e l'incorporamento. - + Save Chat Context Salva il contesto della chat - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. Salva lo stato del modello di chat su disco per un caricamento più rapido. ATTENZIONE: utilizza circa 2 GB per chat. - + Enable Local Server Abilita server locale - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. Esporre un server compatibile con OpenAI a localhost. ATTENZIONE: comporta un maggiore utilizzo delle risorse. - + API Server Port Porta del server API - + The port to use for the local server. Requires restart. La porta da utilizzare per il server locale. Richiede il riavvio. - + Check For Updates Controlla gli aggiornamenti - + Manually check for an update to GPT4All. Verifica manualmente l'aggiornamento di GPT4All. - + Updates Aggiornamenti @@ -611,13 +611,13 @@ Chat - - + + New Chat Nuova Chat - + Server Chat Chat del server @@ -625,12 +625,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server ERRORE: si è verificato un errore di rete durante la connessione al server API - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished ha ricevuto l'errore HTTP %1 %2 @@ -638,62 +638,62 @@ ChatDrawer - + Drawer Cassetto - + Main navigation drawer Cassetto di navigazione principale - + + New Chat + Nuova Chat - + Create a new chat Crea una nuova chat - + Select the current chat or edit the chat when in edit mode Seleziona la chat corrente o modifica la chat in modalità modifica - + Edit chat name Modifica il nome della chat - + Save chat name Salva il nome della chat - + Delete chat Elimina chat - + Confirm chat deletion Conferma l'eliminazione della chat - + Cancel chat deletion Annulla l'eliminazione della chat - + List of chats Elenco delle chat - + List of chats in the drawer dialog Elenco delle chat nella finestra di dialogo del cassetto @@ -701,32 +701,32 @@ ChatListModel - + TODAY OGGI - + THIS WEEK QUESTA SETTIMANA - + THIS MONTH QUESTO MESE - + LAST SIX MONTHS ULTIMI SEI MESI - + THIS YEAR QUEST'ANNO - + LAST YEAR L'ANNO SCORSO @@ -734,150 +734,150 @@ ChatView - + <h3>Warning</h3><p>%1</p> <h3>Avviso</h3><p>%1</p> - + Switch model dialog Finestra di dialogo Cambia modello - + Warn the user if they switch models, then context will be erased Avvisa l'utente che se cambia modello, il contesto verrà cancellato - + Conversation copied to clipboard. Conversazione copiata negli appunti. - + Code copied to clipboard. Codice copiato negli appunti. - + Chat panel Pannello chat - + Chat panel with options Pannello chat con opzioni - + Reload the currently loaded model Ricarica il modello attualmente caricato - + Eject the currently loaded model Espelli il modello attualmente caricato - + No model installed. Nessun modello installato. - + Model loading error. Errore di caricamento del modello. - + Waiting for model... In attesa del modello... - + Switching context... Cambio contesto... - + Choose a model... Scegli un modello... - + Not found: %1 Non trovato: %1 - + The top item is the current model L'elemento in alto è il modello attuale - - + + LocalDocs - + Add documents Aggiungi documenti - + add collections of documents to the chat aggiungi raccolte di documenti alla chat - + Load the default model Carica il modello predefinito - + Loads the default model which can be changed in settings Carica il modello predefinito che può essere modificato nei settaggi - + No Model Installed Nessun modello installato - + GPT4All requires that you install at least one model to get started GPT4All richiede l'installazione di almeno un modello per iniziare - + Install a Model Installa un modello - + Shows the add model view Mostra la vista aggiungi modello - + Conversation with the model Conversazione con il modello - + prompt / response pairs from the conversation coppie prompt/risposta dalla conversazione - + GPT4All - + You Tu @@ -886,139 +886,139 @@ modello per iniziare ricalcolo contesto ... - + response stopped ... risposta interrotta ... - + processing ... elaborazione ... - + generating response ... generazione risposta ... - + generating questions ... generarzione domande ... - - + + Copy Copia - + Copy Message Copia messaggio - + Disable markdown Disabilita Markdown - + Enable markdown Abilita Markdown - + Thumbs up Mi piace - + Gives a thumbs up to the response Dà un mi piace alla risposta - + Thumbs down Non mi piace - + Opens thumbs down dialog Apre la finestra di dialogo "Non mi piace" - + Suggested follow-ups Approfondimenti suggeriti - + Erase and reset chat session Cancella e ripristina la sessione di chat - + Copy chat session to clipboard Copia la sessione di chat negli appunti - + Redo last chat response Riesegui l'ultima risposta della chat - + Stop generating Interrompi la generazione - + Stop the current response generation Arresta la generazione della risposta corrente - + Reloads the model Ricarica il modello - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>Si è verificato un errore durante il caricamento del modello:</h3><br><i>"%1"</i><br><br>Gli errori di caricamento del modello possono verificarsi per diversi motivi, ma le cause più comuni includono un formato di file non valido, un download incompleto o danneggiato, il tipo di file sbagliato, RAM di sistema insufficiente o un tipo di modello incompatibile. Ecco alcuni suggerimenti per risolvere il problema:<br><ul><li>Assicurati che il file del modello abbia un formato e un tipo compatibili<li>Verifica che il file del modello sia completo nella cartella di download<li>Puoi trovare la cartella di download nella finestra di dialogo dei settaggi<li>Se hai scaricato manualmente il modello, assicurati che il file non sia danneggiato controllando md5sum<li>Leggi ulteriori informazioni su quali modelli sono supportati nella nostra <a href="https://docs.gpt4all.io/ ">documentazione</a> per la GUI<li>Consulta il nostro <a href="https://discord.gg/4M2QFmTt2k">canale Discord</a> per assistenza - - + + Reload · %1 Ricarica · %1 - + Loading · %1 Caricamento · %1 - + Load · %1 (default) → Carica · %1 (predefinito) → - + restoring from text ... ripristino dal testo ... - + retrieving localdocs: %1 ... recupero documenti locali: %1 ... - + searching localdocs: %1 ... ricerca in documenti locali: %1 ... - + %n Source(s) %n Fonte @@ -1026,42 +1026,42 @@ modello per iniziare - + Send a message... Manda un messaggio... - + Load a model to continue... Carica un modello per continuare... - + Send messages/prompts to the model Invia messaggi/prompt al modello - + Cut Taglia - + Paste Incolla - + Select All Seleziona tutto - + Send message Invia messaggio - + Sends the message/prompt contained in textfield to the model Invia il messaggio/prompt contenuto nel campo di testo al modello @@ -1069,12 +1069,12 @@ modello per iniziare CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results Avviso: la ricerca nelle raccolte durante l'indicizzazione può restituire risultati incompleti - + %n file(s) %n file @@ -1082,7 +1082,7 @@ modello per iniziare - + %n word(s) %n parola @@ -1090,17 +1090,17 @@ modello per iniziare - + Updating In aggiornamento - + + Add Docs + Aggiungi documenti - + Select a collection to make it available to the chat model. Seleziona una raccolta per renderla disponibile al modello in chat. @@ -1108,37 +1108,37 @@ modello per iniziare Download - + Model "%1" is installed successfully. Il modello "%1" è stato installato correttamente. - + ERROR: $MODEL_NAME is empty. ERRORE: $MODEL_NAME è vuoto. - + ERROR: $API_KEY is empty. ERRORE: $API_KEY è vuoto. - + ERROR: $BASE_URL is invalid. ERRORE: $BASE_URL non è valido. - + ERROR: Model "%1 (%2)" is conflict. ERRORE: il modello "%1 (%2)" è in conflitto. - + Model "%1 (%2)" is installed successfully. Il modello "%1 (%2)" è stato installato correttamente. - + Model "%1" is removed. Il modello "%1" è stato rimosso. @@ -1146,92 +1146,92 @@ modello per iniziare HomeView - + Welcome to GPT4All Benvenuto in GPT4All - + The privacy-first LLM chat application L'applicazione di chat LLM che mette al primo posto la privacy - + Start chatting Inizia a chattare - + Start Chatting Inizia a Chattare - + Chat with any LLM Chatta con qualsiasi LLM - + LocalDocs - + Chat with your local files Chatta con i tuoi file locali - + Find Models Trova modelli - + Explore and download models Esplora e scarica i modelli - + Latest news Ultime notizie - + Latest news from GPT4All Ultime notizie da GPT4All - + Release Notes Note di rilascio - + Documentation Documentazione - + Discord - + X (Twitter) - + Github - + nomic.ai nomic.ai - + Subscribe to Newsletter Iscriviti alla Newsletter @@ -1239,118 +1239,118 @@ modello per iniziare LocalDocsSettings - + LocalDocs - + LocalDocs Settings Settaggi LocalDocs - + Indexing Indicizzazione - + Allowed File Extensions Estensioni di file consentite - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. Elenco separato da virgole. LocalDocs tenterà di elaborare solo file con queste estensioni. - + Embedding Questo termine si dovrebbe tradurre come "Incorporamento". This term has been translated in other applications like A1111 and InvokeAI as "Incorporamento" Incorporamento - + Use Nomic Embed API Utilizza l'API di incorporamento Nomic Embed - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. Incorpora documenti utilizzando la veloce API di Nomic invece di un modello locale privato. Richiede il riavvio. - + Nomic API Key Chiave API di Nomic - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. Chiave API da utilizzare per Nomic Embed. Ottienine una dalla <a href="https://atlas.nomic.ai/cli-login">pagina delle chiavi API</a> di Atlas. Richiede il riavvio. - + Embeddings Device Dispositivo per incorporamenti - + The compute device used for embeddings. Requires restart. Il dispositivo di calcolo utilizzato per gli incorporamenti. Richiede il riavvio. - + Application default Applicazione predefinita - + Display Mostra - + Show Sources Mostra le fonti - + Display the sources used for each response. Visualizza le fonti utilizzate per ciascuna risposta. - + Advanced Avanzate - + Warning: Advanced usage only. Avvertenza: solo per uso avanzato. - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. Valori troppo grandi possono causare errori di Localdocs, risposte estremamente lente o l'impossibilità di rispondere. In parole povere, {N caratteri x N frammenti} vengono aggiunti alla finestra di contesto del modello. Maggiori informazioni <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">qui</a>. - + Document snippet size (characters) Dimensioni del frammento di documento (caratteri) - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. Numero di caratteri per frammento di documento. Numeri più grandi aumentano la probabilità di risposte basate sui fatti, ma comportano anche una generazione più lenta. - + Max document snippets per prompt Numero massimo di frammenti di documento per prompt - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. Il numero massimo di frammenti di documento recuperati, che presentano le migliori corrispondenze, da includere nel contesto del prompt. Numeri più alti aumentano la probabilità di ricevere risposte basate sui fatti, ma comportano anche una generazione più lenta. @@ -1358,17 +1358,17 @@ modello per iniziare LocalDocsView - + LocalDocs - + Chat with your local files Chatta con i tuoi file locali - + + Add Collection + Aggiungi raccolta @@ -1377,102 +1377,102 @@ modello per iniziare ERRORE: il database di LocalDocs non è valido. - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. <h3>ERRORE: Impossibile accedere al database LocalDocs o non è valido.</h3><br><i>Nota: sarà necessario riavviare dopo aver provato una delle seguenti soluzioni suggerite.</i><br><ul><li>Assicurati che la cartella impostata come <b>Percorso di download</b> esista nel file system.</li><li>Controlla la proprietà e i permessi di lettura e scrittura del <b>Percorso di download</b>.</li><li>Se è presente un file <b>localdocs_v2.db</b>, controlla anche la sua proprietà e i permessi di lettura/scrittura.</li></ul><br>Se il problema persiste e sono presenti file 'localdocs_v*.db', come ultima risorsa puoi<br>provare a eseguirne il backup e a rimuoverli. Tuttavia, dovrai ricreare le tue raccolte. - + No Collections Installed Nessuna raccolta installata - + Install a collection of local documents to get started using this feature Installa una raccolta di documenti locali per iniziare a utilizzare questa funzionalità - + + Add Doc Collection + Aggiungi raccolta di documenti - + Shows the add model view Mostra la vista aggiungi modello - + Indexing progressBar Barra di avanzamento indicizzazione - + Shows the progress made in the indexing Mostra lo stato di avanzamento dell'indicizzazione - + ERROR ERRORE - + INDEXING INDICIZZAZIONE - + EMBEDDING INCORPORAMENTO - + REQUIRES UPDATE RICHIEDE AGGIORNAMENTO - + READY PRONTO - + INSTALLING INSTALLAZIONE - + Indexing in progress Indicizzazione in corso - + Embedding in progress Incorporamento in corso - + This collection requires an update after version change Questa raccolta richiede un aggiornamento dopo il cambio di versione - + Automatically reindexes upon changes to the folder Reindicizza automaticamente in caso di modifiche alla cartella - + Installation in progress Installazione in corso - + % % - + %n file(s) %n file @@ -1480,7 +1480,7 @@ modello per iniziare - + %n word(s) %n parola @@ -1488,27 +1488,27 @@ modello per iniziare - + Remove Rimuovi - + Rebuild Ricostruisci - + Reindex this folder from scratch. This is slow and usually not needed. Reindicizzare questa cartella da zero. Lento e di solito non necessario. - + Update Aggiorna - + Update the collection to the new version. This is a slow operation. Aggiorna la raccolta alla nuova versione. Questa è un'operazione lenta. @@ -1516,67 +1516,78 @@ modello per iniziare ModelList - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>Richiede una chiave API OpenAI personale.</li><li>ATTENZIONE: invierà le tue chat a OpenAI!</li><li>La tua chiave API verrà archiviata su disco</li><li> Verrà utilizzato solo per comunicare con OpenAI</li><li>Puoi richiedere una chiave API <a href="https://platform.openai.com/account/api-keys">qui.</a> </li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 - + <strong>Mistral Tiny model</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* Anche se paghi OpenAI per ChatGPT-4 questo non garantisce l'accesso alla chiave API. Contatta OpenAI per maggiori informazioni. - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) %1 (%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>Modello API compatibile con OpenAI</strong><br><ul><li>Chiave API: %1</li><li>URL di base: %2</li><li>Nome modello: %3</li></ul> - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>Richiede una chiave API Mistral personale.</li><li>ATTENZIONE: invierà le tue chat a Mistral!</li><li>La tua chiave API verrà archiviata su disco</li><li> Verrà utilizzato solo per comunicare con Mistral</li><li>Puoi richiedere una chiave API <a href="https://console.mistral.ai/user/api-keys">qui</a>. </li> - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>Richiede una chiave API personale e l'URL di base dell'API.</li><li>ATTENZIONE: invierà le tue chat al server API compatibile con OpenAI che hai specificato!</li><li>La tua chiave API verrà archiviata su disco</li><li>Verrà utilizzata solo per comunicare con il server API compatibile con OpenAI</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>Connetti al server API compatibile con OpenAI</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>Creato da %1.</strong><br><ul><li>Pubblicato il %2.<li>Questo modello ha %3 Mi piace.<li>Questo modello ha %4 download.<li>Altro informazioni possono essere trovate <a href="https://huggingface.co/%5">qui.</a></ul> @@ -1584,92 +1595,92 @@ modello per iniziare ModelSettings - + Model Modello - + Model Settings Settaggi modello - + Clone Clona - + Remove Rimuovi - + Name Nome - + Model File File del modello - + System Prompt Prompt di sistema - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. Prefisso all'inizio di ogni conversazione. Deve contenere i token di inquadramento appropriati. - + Prompt Template Schema del prompt - + The template that wraps every prompt. Lo schema che incorpora ogni prompt. - + Must contain the string "%1" to be replaced with the user's input. Deve contenere la stringa "%1" da sostituire con l'input dell'utente. - + Chat Name Prompt Prompt del nome della chat - + Prompt used to automatically generate chat names. Prompt utilizzato per generare automaticamente nomi di chat. - + Suggested FollowUp Prompt Prompt di approfondimento suggerito - + Prompt used to generate suggested follow-up questions. Prompt utilizzato per generare le domande di approfondimento suggerite. - + Context Length Lunghezza del contesto - + Number of input and output tokens the model sees. Numero di token di input e output visualizzati dal modello. - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. @@ -1678,128 +1689,128 @@ L'utilizzo di un contesto maggiore rispetto a quello su cui è stato addest NOTA: non ha effetto finché non si ricarica il modello. - + Temperature Temperatura - + Randomness of model output. Higher -> more variation. Casualità dell'uscita del modello. Più alto -> più variazione. - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. La temperatura aumenta le possibilità di scegliere token meno probabili. NOTA: una temperatura più elevata offre risultati più creativi ma meno prevedibili. - + Top-P - + Nucleus Sampling factor. Lower -> more predictable. Fattore di campionamento del nucleo. Inferiore -> più prevedibile. - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. Solo i token più probabili, fino a un totale di probabilità di top_p, possono essere scelti. NOTA: impedisce la scelta di token altamente improbabili. - + Min-P - + Minimum token probability. Higher -> more predictable. Probabilità minima del token. Più alto -> più prevedibile. - + Sets the minimum relative probability for a token to be considered. Imposta la probabilità relativa minima affinché un token venga considerato. - + Top-K - + Size of selection pool for tokens. Dimensione del lotto di selezione per i token. - + Only the top K most likely tokens will be chosen from. Saranno scelti solo i primi K token più probabili. - + Max Length Lunghezza massima - + Maximum response length, in tokens. Lunghezza massima della risposta, in token. - + Prompt Batch Size Dimensioni del lotto di prompt - + The batch size used for prompt processing. La dimensione del lotto usata per l'elaborazione dei prompt. - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. Numero di token del prompt da elaborare contemporaneamente. NOTA: valori più alti possono velocizzare la lettura dei prompt ma utilizzeranno più RAM. - + Repeat Penalty Penalità di ripetizione - + Repetition penalty factor. Set to 1 to disable. Fattore di penalità di ripetizione. Impostare su 1 per disabilitare. - + Repeat Penalty Tokens Token di penalità ripetizione - + Number of previous tokens used for penalty. Numero di token precedenti utilizzati per la penalità. - + GPU Layers Livelli GPU - + Number of model layers to load into VRAM. Numero di livelli del modello da caricare nella VRAM. - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1811,217 +1822,217 @@ NOTA: non ha effetto finché non si ricarica il modello. ModelsView - + No Models Installed Nessun modello installato - + Install a model to get started using GPT4All Installa un modello per iniziare a utilizzare GPT4All - - + + + Add Model + Aggiungi Modello - + Shows the add model view Mostra la vista aggiungi modello - + Installed Models Modelli installati - + Locally installed chat models Modelli per chat installati localmente - + Model file File del modello - + Model file to be downloaded File del modello da scaricare - + Description Descrizione - + File description Descrizione del file - + Cancel Annulla - + Resume Riprendi - + Stop/restart/start the download Arresta/riavvia/avvia il download - + Remove Rimuovi - + Remove model from filesystem Rimuovi il modello dal sistema dei file - - + + Install Installa - + Install online model Installa il modello online - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Errore</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">AVVISO: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).</strong></font> - + ERROR: $API_KEY is empty. ERRORE: $API_KEY è vuoto. - + ERROR: $BASE_URL is empty. ERRORE: $BASE_URL non è valido. - + enter $BASE_URL inserisci $BASE_URL - + ERROR: $MODEL_NAME is empty. ERRORE: $MODEL_NAME è vuoto. - + enter $MODEL_NAME inserisci $MODEL_NAME - + %1 GB - + ? - + Describes an error that occurred when downloading Descrive un errore che si è verificato durante lo scaricamento - + Error for incompatible hardware Errore per hardware incompatibile - + Download progressBar Barra di avanzamento dello scaricamento - + Shows the progress made in the download Mostra lo stato di avanzamento dello scaricamento - + Download speed Velocità di scaricamento - + Download speed in bytes/kilobytes/megabytes per second Velocità di scaricamento in byte/kilobyte/megabyte al secondo - + Calculating... Calcolo in corso... - - - - + + + + Whether the file hash is being calculated Se viene calcolato l'hash del file - + Busy indicator Indicatore di occupato - + Displayed when the file hash is being calculated Visualizzato durante il calcolo dell'hash del file - + enter $API_KEY Inserire $API_KEY - + File size Dimensione del file - + RAM required RAM richiesta - + Parameters Parametri - + Quant Quant - + Type Tipo @@ -2029,12 +2040,12 @@ NOTA: non ha effetto finché non si ricarica il modello. MyFancyLink - + Fancy link Mio link - + A stylized link Un link d'esempio @@ -2042,7 +2053,7 @@ NOTA: non ha effetto finché non si ricarica il modello. MySettingsStack - + Please choose a directory Scegli una cartella @@ -2050,12 +2061,12 @@ NOTA: non ha effetto finché non si ricarica il modello. MySettingsTab - + Restore Defaults Riprista i valori predefiniti - + Restores settings dialog to a default state Ripristina la finestra di dialogo dei settaggi a uno stato predefinito @@ -2063,12 +2074,12 @@ NOTA: non ha effetto finché non si ricarica il modello. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. Contribuisci con i tuoi dati al Datalake Open Source di GPT4All. - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2081,47 +2092,47 @@ Quando un modello di GPT4All ti risponde e tu hai aderito, la tua conversazione NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di GPT4All. Non dovresti avere aspettative sulla privacy della chat quando questa funzione è abilitata. Dovresti, tuttavia, aspettarti un'attribuzione facoltativa, se lo desideri. I tuoi dati di chat saranno liberamente disponibili per essere scaricati da chiunque e verranno utilizzati da Nomic AI per migliorare i futuri modelli GPT4All. Nomic AI conserverà tutte le informazioni di attribuzione allegate ai tuoi dati e verrai accreditato come collaboratore a qualsiasi versione del modello GPT4All che utilizza i tuoi dati! - + Terms for opt-in Termini per l'adesione - + Describes what will happen when you opt-in Descrive cosa accadrà quando effettuerai l'adesione - + Please provide a name for attribution (optional) Fornisci un nome per l'attribuzione (facoltativo) - + Attribution (optional) Attribuzione (facoltativo) - + Provide attribution Fornire attribuzione - + Enable Abilita - + Enable opt-in Abilita l'adesione - + Cancel Annulla - + Cancel opt-in Annulla l'adesione @@ -2129,17 +2140,17 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di NewVersionDialog - + New version is available Nuova versione disponibile - + Update Aggiorna - + Update to new version Aggiorna alla nuova versione @@ -2147,17 +2158,17 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di PopupDialog - + Reveals a shortlived help balloon Rivela un messaggio di aiuto di breve durata - + Busy indicator Indicatore di occupato - + Displayed when the popup is showing busy Visualizzato quando la finestra a comparsa risulta occupata @@ -2165,28 +2176,28 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di SettingsView - - + + Settings Settaggi - + Contains various application settings Contiene vari settaggi dell'applicazione - + Application Applicazione - + Model Modello - + LocalDocs @@ -2194,12 +2205,12 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di StartupDialog - + Welcome! Benvenuto! - + ### Release notes %1### Contributors %2 @@ -2208,17 +2219,17 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di %2 - + Release notes Note di rilascio - + Release notes for this version Note di rilascio per questa versione - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2241,71 +2252,71 @@ Quando un modello di GPT4All ti risponde e tu hai aderito, la tua conversazione NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di GPT4All. Non dovresti avere aspettative sulla privacy della chat quando questa funzione è abilitata. Dovresti, tuttavia, aspettarti un'attribuzione facoltativa, se lo desideri, . I tuoi dati di chat saranno liberamente disponibili per essere scaricati da chiunque e verranno utilizzati da Nomic AI per migliorare i futuri modelli GPT4All. Nomic AI conserverà tutte le informazioni di attribuzione allegate ai tuoi dati e verrai accreditato come collaboratore a qualsiasi versione del modello GPT4All che utilizza i tuoi dati! - + Terms for opt-in Termini per l'adesione - + Describes what will happen when you opt-in Descrive cosa accadrà quando effettuerai l'adesione - - + + Opt-in for anonymous usage statistics Attiva le statistiche di utilizzo anonime - - + + Yes Si - + Allow opt-in for anonymous usage statistics Consenti l'attivazione di statistiche di utilizzo anonime - - + + No No - + Opt-out for anonymous usage statistics Disattiva le statistiche di utilizzo anonime - + Allow opt-out for anonymous usage statistics Consenti la disattivazione per le statistiche di utilizzo anonime - - + + Opt-in for network Aderisci per la rete - + Allow opt-in for network Consenti l'adesione per la rete - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake Consenti la condivisione anonima delle chat su GPT4All Datalake - + Opt-out for network Disattiva per la rete - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake Consenti la non adesione alla condivisione anonima delle chat nel GPT4All Datalake @@ -2313,23 +2324,23 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di SwitchModelDialog - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>Avviso:</b> la modifica del modello cancellerà la conversazione corrente. Vuoi continuare? - + Continue Continua - + Continue with model loading Continuare con il caricamento del modello - - + + Cancel Annulla @@ -2337,32 +2348,32 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) Modifica il testo seguente per fornire una risposta migliore. (opzionale) - + Please provide a better response... Si prega di fornire una risposta migliore... - + Submit Invia - + Submits the user's response Invia la risposta dell'utente - + Cancel Annulla - + Closes the response dialog Chiude la finestra di dialogo della risposta @@ -2370,125 +2381,125 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di main - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>Si è verificato un errore all'avvio:</h3><br><i>"Rilevato hardware incompatibile."</i><br><br>Sfortunatamente, la tua CPU non soddisfa i requisiti minimi per eseguire questo programma. In particolare, non supporta gli elementi intrinseci AVX richiesti da questo programma per eseguire con successo un modello linguistico moderno e di grandi dimensioni. L'unica soluzione in questo momento è aggiornare il tuo hardware con una CPU più moderna.<br><br>Vedi qui per ulteriori informazioni: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https ://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>Si è verificato un errore all'avvio:</h3><br><i>"Impossibile accedere al file dei settaggi."</i><br><br>Sfortunatamente, qualcosa impedisce al programma di accedere al file dei settaggi. Ciò potrebbe essere causato da autorizzazioni errate nella cartella di configurazione locale dell'app in cui si trova il file dei settaggi. Dai un'occhiata al nostro <a href="https://discord.gg/4M2QFmTt2k">canale Discord</a> per ricevere assistenza. - + Connection to datalake failed. La connessione al Datalake non è riuscita. - + Saving chats. Salvataggio delle chat. - + Network dialog Dialogo di rete - + opt-in to share feedback/conversations aderisci per condividere feedback/conversazioni - + Home view Vista iniziale - + Home view of application Vista iniziale dell'applicazione - + Home Inizia - + Chat view Vista chat - + Chat view to interact with models Vista chat per interagire con i modelli - + Chats Chat - - + + Models Modelli - + Models view for installed models Vista modelli per i modelli installati - - + + LocalDocs - + LocalDocs view to configure and use local docs Vista LocalDocs per configurare e utilizzare i documenti locali - - + + Settings Settaggi - + Settings view for application configuration Vista dei settaggi per la configurazione dell'applicazione - + The datalake is enabled Il Datalake è abilitato - + Using a network model Utilizzando un modello di rete - + Server mode is enabled La modalità server è abilitata - + Installed models Modelli installati - + View of installed models Vista dei modelli installati diff --git a/gpt4all-chat/translations/gpt4all_pt_BR.ts b/gpt4all-chat/translations/gpt4all_pt_BR.ts index 38a9177b79f4..76b5b5aad362 100644 --- a/gpt4all-chat/translations/gpt4all_pt_BR.ts +++ b/gpt4all-chat/translations/gpt4all_pt_BR.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections ← Minhas coleções - + Add Document Collection Adicionar Coleção de Documentos - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. Adicione uma pasta contendo arquivos de texto simples, PDFs ou Markdown. Configure extensões adicionais nas Configurações. - + Please choose a directory Escolha um diretório - + Name Nome - + Collection name... Nome da coleção... - + Name of the collection to add (Required) Nome da coleção (obrigatório) - + Folder Pasta - + Folder path... Caminho da pasta... - + Folder path to documents (Required) Caminho da pasta com os documentos (obrigatório) - + Browse Procurar - + Create Collection Criar Coleção @@ -67,288 +67,288 @@ AddModelView - + ← Existing Models ← Meus Modelos - + Explore Models Descobrir Modelos - + Discover and download models by keyword search... Pesquisar modelos... - + Text field for discovering and filtering downloadable models Campo de texto para descobrir e filtrar modelos para download - + Initiate model discovery and filtering Pesquisar e filtrar modelos - + Triggers discovery and filtering of models Aciona a descoberta e filtragem de modelos - + Default Padrão - + Likes Curtidas - + Downloads Downloads - + Recent Recentes - + Asc Asc - + Desc Desc - + None Nenhum - + Searching · %1 Pesquisando · %1 - + Sort by: %1 Ordenar por: %1 - + Sort dir: %1 Ordenar diretório: %1 - + Limit: %1 Limite: %1 - + Network error: could not retrieve %1 Erro de rede: não foi possível obter %1 - - + + Busy indicator Indicador de processamento - + Displayed when the models request is ongoing xibido enquanto os modelos estão sendo carregados - + Model file Arquivo do modelo - + Model file to be downloaded Arquivo do modelo a ser baixado - + Description Descrição - + File description Descrição do arquivo - + Cancel Cancelar - + Resume Retomar - + Download Baixar - + Stop/restart/start the download Parar/reiniciar/iniciar o download - + Remove Remover - + Remove model from filesystem Remover modelo do sistema - - + + Install Instalar - + Install online model Instalar modelo online - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">ATENÇÃO: Este modelo não é recomendado para seu hardware. Ele exige mais memória (%1 GB) do que seu sistema possui (%2).</strong></font> - + ERROR: $API_KEY is empty. ERRO: A $API_KEY está vazia. - + ERROR: $BASE_URL is empty. ERRO: A $BASE_URL está vazia. - + enter $BASE_URL inserir a $BASE_URL - + ERROR: $MODEL_NAME is empty. ERRO: O $MODEL_NAME está vazio. - + enter $MODEL_NAME inserir o $MODEL_NAME - + %1 GB %1 GB - - + + ? ? - + Describes an error that occurred when downloading Mostra informações sobre o erro no download - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Erro</a></strong></font> - + Error for incompatible hardware Aviso: Hardware não compatível - + Download progressBar Progresso do download - + Shows the progress made in the download Mostra o progresso do download - + Download speed Velocidade de download - + Download speed in bytes/kilobytes/megabytes per second Velocidade de download em bytes/kilobytes/megabytes por segundo - + Calculating... Calculando... - - - - + + + + Whether the file hash is being calculated Quando o hash do arquivo está sendo calculado - + Displayed when the file hash is being calculated Exibido durante o cálculo do hash do arquivo - + enter $API_KEY inserir $API_KEY - + File size Tamanho do arquivo - + RAM required RAM necessária - + Parameters Parâmetros - + Quant Quant - + Type Tipo @@ -356,22 +356,22 @@ ApplicationSettings - + Application Aplicativo - + Network dialog Mensagens de rede - + opt-in to share feedback/conversations Compartilhar feedback e conversas - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -388,225 +388,225 @@ reinstalar o aplicativo. - + Error dialog Mensagens de erro - + Application Settings Configurações - + General Geral - + Theme Tema - + The application color scheme. Esquema de cores. - + Dark Modo Escuro - + Light Modo Claro - + LegacyDark Modo escuro (legado) - + Font Size Tamanho da Fonte - + The size of text in the application. Tamanho do texto. - + Small Pequeno - + Medium Médio - + Large Grande - + Language and Locale Idioma e Região - + The language and locale you wish to use. Selecione seu idioma e região. - + System Locale Local do Sistema - + Device Processador - + The compute device used for text generation. I chose to use "Processador" instead of "Dispositivo" (Device) or "Dispositivo de Computação" (Compute Device) to simplify the terminology and make it more straightforward and understandable. "Dispositivo" can be vague and could refer to various types of hardware, whereas "Processador" clearly and specifically indicates the component responsible for processing tasks. This improves usability by avoiding the ambiguity that might arise from using more generic terms like "Dispositivo." Processador usado para gerar texto. - - + + Application default Aplicativo padrão - + Default Model Modelo Padrão - + The preferred model for new chats. Also used as the local server fallback. Modelo padrão para novos chats e em caso de falha do modelo principal. - + Suggestion Mode Modo de sugestões - + Generate suggested follow-up questions at the end of responses. Sugerir perguntas após as respostas. - + When chatting with LocalDocs Ao conversar com o LocalDocs - + Whenever possible Sempre que possível - + Never Nunca - + Download Path Diretório de Download - + Where to store local models and the LocalDocs database. Pasta para modelos e banco de dados do LocalDocs. - + Browse Procurar - + Choose where to save model files Local para armazenar os modelos - + Enable Datalake Habilitar Datalake - + Send chats and feedback to the GPT4All Open-Source Datalake. Contribua para o Datalake de código aberto do GPT4All. - + Advanced Avançado - + CPU Threads Threads de CPU - + The number of CPU threads used for inference and embedding. Quantidade de núcleos (threads) do processador usados para processar e responder às suas perguntas. - + Save Chat Context I used "Histórico do Chat" (Chat History) instead of "Contexto do Chat" (Chat Context) to clearly convey that it refers to saving past messages, making it more intuitive and avoiding potential confusion with abstract terms. Salvar Histórico do Chat - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. Salvar histórico do chat para carregamento mais rápido. (Usa aprox. 2GB por chat). - + Enable Local Server Ativar Servidor Local - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. Ativar servidor local compatível com OpenAI (uso de recursos elevado). - + API Server Port Porta da API - + The port to use for the local server. Requires restart. Porta de acesso ao servidor local. (requer reinicialização). - + Check For Updates Procurar por Atualizações - + Manually check for an update to GPT4All. Verifica se há novas atualizações para o GPT4All. - + Updates Atualizações @@ -614,13 +614,13 @@ Chat - - + + New Chat Novo Chat - + Server Chat Chat com o Servidor @@ -628,12 +628,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server ERRO: Ocorreu um erro de rede ao conectar-se ao servidor da API - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished recebeu erro HTTP %1 %2 @@ -641,62 +641,62 @@ ChatDrawer - + Drawer Menu Lateral - + Main navigation drawer Menu de navegação principal - + + New Chat + Novo Chat - + Create a new chat Criar um novo chat - + Select the current chat or edit the chat when in edit mode Selecione o chat atual ou edite o chat quando estiver no modo de edição - + Edit chat name Editar nome do chat - + Save chat name Salvar nome do chat - + Delete chat Excluir chat - + Confirm chat deletion Confirmar exclusão do chat - + Cancel chat deletion Cancelar exclusão do chat - + List of chats Lista de chats - + List of chats in the drawer dialog Lista de chats na caixa de diálogo do menu lateral @@ -704,32 +704,32 @@ ChatListModel - + TODAY HOJE - + THIS WEEK ESTA SEMANA - + THIS MONTH ESTE MÊS - + LAST SIX MONTHS ÚLTIMOS SEIS MESES - + THIS YEAR ESTE ANO - + LAST YEAR ANO PASSADO @@ -737,150 +737,150 @@ ChatView - + <h3>Warning</h3><p>%1</p> <h3>Aviso</h3><p>%1</p> - + Switch model dialog Mensagem ao troca de modelo - + Warn the user if they switch models, then context will be erased Ao trocar de modelo, o contexto da conversa será apagado - + Conversation copied to clipboard. Conversa copiada. - + Code copied to clipboard. Código copiado. - + Chat panel Painel de chat - + Chat panel with options Painel de chat com opções - + Reload the currently loaded model Recarregar modelo atual - + Eject the currently loaded model Ejetar o modelo carregado atualmente - + No model installed. Nenhum modelo instalado. - + Model loading error. Erro ao carregar o modelo. - + Waiting for model... Aguardando modelo... - + Switching context... Mudando de contexto... - + Choose a model... Escolha um modelo... - + Not found: %1 Não encontrado: %1 - + The top item is the current model O modelo atual é exibido no topo - - + + LocalDocs LocalDocs - + Add documents Adicionar documentos - + add collections of documents to the chat Adicionar Coleção de Documentos - + Load the default model Carregar o modelo padrão - + Loads the default model which can be changed in settings Carrega o modelo padrão (personalizável nas configurações) - + No Model Installed Nenhum Modelo Instalado - + GPT4All requires that you install at least one model to get started O GPT4All precisa de pelo menos um modelo modelo instalado para funcionar - + Install a Model Instalar um Modelo - + Shows the add model view Mostra a visualização para adicionar modelo - + Conversation with the model Conversa com o modelo - + prompt / response pairs from the conversation Pares de pergunta/resposta da conversa - + GPT4All GPT4All - + You Você @@ -889,139 +889,139 @@ modelo instalado para funcionar recalculando contexto... - + response stopped ... resposta interrompida... - + processing ... processando... - + generating response ... gerando resposta... - + generating questions ... gerando perguntas... - - + + Copy Copiar - + Copy Message Copiar Mensagem - + Disable markdown Desativar markdown - + Enable markdown Ativar markdown - + Thumbs up Resposta boa - + Gives a thumbs up to the response Curte a resposta - + Thumbs down Resposta ruim - + Opens thumbs down dialog Abrir diálogo de joinha para baixo - + Suggested follow-ups Perguntas relacionadas - + Erase and reset chat session Apagar e redefinir sessão de chat - + Copy chat session to clipboard Copiar histórico da conversa - + Redo last chat response Refazer última resposta - + Stop generating Parar de gerar - + Stop the current response generation Parar a geração da resposta atual - + Reloads the model Recarrega modelo - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>Ocorreu um erro ao carregar o modelo:</h3><br><i>"%1"</i><br><br>Falhas no carregamento do modelo podem acontecer por vários motivos, mas as causas mais comuns incluem um formato de arquivo incorreto, um download incompleto ou corrompido, o tipo de arquivo errado, memória RAM do sistema insuficiente ou um tipo de modelo incompatível. Aqui estão algumas sugestões para resolver o problema:<br><ul><li>Certifique-se de que o arquivo do modelo tenha um formato e tipo compatíveis<li>Verifique se o arquivo do modelo está completo na pasta de download<li>Você pode encontrar a pasta de download na caixa de diálogo de configurações<li>Se você carregou o modelo, certifique-se de que o arquivo não esteja corrompido verificando o md5sum<li>Leia mais sobre quais modelos são suportados em nossa <a href="https://docs.gpt4all.io/">documentação</a> para a interface gráfica<li>Confira nosso <a href="https://discord.gg/4M2QFmTt2k">canal do Discord</a> para obter ajuda - - + + Reload · %1 Recarregar · %1 - + Loading · %1 Carregando · %1 - + Load · %1 (default) → Carregar · %1 (padrão) → - + restoring from text ... Recuperando do texto... - + retrieving localdocs: %1 ... Recuperando dados em LocalDocs: %1 ... - + searching localdocs: %1 ... Buscando em LocalDocs: %1 ... - + %n Source(s) %n Origem @@ -1029,42 +1029,42 @@ modelo instalado para funcionar - + Send a message... Enviar uma mensagem... - + Load a model to continue... Carregue um modelo para continuar... - + Send messages/prompts to the model Enviar mensagens/prompts para o modelo - + Cut Recortar - + Paste Colar - + Select All Selecionar tudo - + Send message Enviar mensagem - + Sends the message/prompt contained in textfield to the model Envia a mensagem/prompt contida no campo de texto para o modelo @@ -1072,12 +1072,12 @@ modelo instalado para funcionar CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results Aviso: pesquisar coleções durante a indexação pode retornar resultados incompletos - + %n file(s) %n arquivo(s) @@ -1085,7 +1085,7 @@ modelo instalado para funcionar - + %n word(s) %n palavra(s) @@ -1093,17 +1093,17 @@ modelo instalado para funcionar - + Updating Atualizando - + + Add Docs + Adicionar Documentos - + Select a collection to make it available to the chat model. Selecione uma coleção para disponibilizá-la ao modelo de chat. @@ -1111,37 +1111,37 @@ modelo instalado para funcionar Download - + Model "%1" is installed successfully. Modelo "%1" instalado com sucesso. - + ERROR: $MODEL_NAME is empty. ERRO: O nome do modelo ($MODEL_NAME) está vazio. - + ERROR: $API_KEY is empty. ERRO: A chave da API ($API_KEY) está vazia. - + ERROR: $BASE_URL is invalid. ERRO: A URL base ($BASE_URL) é inválida. - + ERROR: Model "%1 (%2)" is conflict. ERRO: Conflito com o modelo "%1 (%2)". - + Model "%1 (%2)" is installed successfully. Modelo "%1 (%2)" instalado com sucesso. - + Model "%1" is removed. Modelo "%1" removido. @@ -1149,92 +1149,92 @@ modelo instalado para funcionar HomeView - + Welcome to GPT4All Bem-vindo ao GPT4All - + The privacy-first LLM chat application O aplicativo de chat LLM que prioriza a privacidade - + Start chatting Iniciar chat - + Start Chatting Iniciar Chat - + Chat with any LLM Converse com qualquer LLM - + LocalDocs LocalDocs - + Chat with your local files Converse com seus arquivos locais - + Find Models Encontrar Modelos - + Explore and download models Descubra e baixe modelos - + Latest news Últimas novidades - + Latest news from GPT4All Últimas novidades do GPT4All - + Release Notes Notas de versão - + Documentation Documentação - + Discord Discord - + X (Twitter) X (Twitter) - + Github Github - + nomic.ai nomic.ai - + Subscribe to Newsletter Assine nossa Newsletter @@ -1242,118 +1242,118 @@ modelo instalado para funcionar LocalDocsSettings - + LocalDocs LocalDocs - + LocalDocs Settings Configurações do LocalDocs - + Indexing Indexação - + Allowed File Extensions Extensões de Arquivo Permitidas - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. Lista separada por vírgulas. O LocalDocs tentará processar apenas arquivos com essas extensões. - + Embedding Incorporação - + Use Nomic Embed API Usar a API Nomic Embed - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. Incorporar documentos usando a API Nomic rápida em vez de um modelo local privado. Requer reinicialização. - + Nomic API Key Chave da API Nomic - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. Chave da API a ser usada para Nomic Embed. Obtenha uma na página de <a href="https://atlas.nomic.ai/cli-login">chaves de API do Atlas</a>. Requer reinicialização. - + Embeddings Device Processamento de Incorporações - + The compute device used for embeddings. Requires restart. Dispositivo usado para processar as incorporações. Requer reinicialização. - + Application default Aplicativo padrão - + Display Exibir - + Show Sources Mostrar Fontes - + Display the sources used for each response. Mostra as fontes usadas para cada resposta. - + Advanced Apenas para usuários avançados - + Warning: Advanced usage only. Atenção: Apenas para usuários avançados. - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. Valores muito altos podem causar falhas no LocalDocs, respostas extremamente lentas ou até mesmo nenhuma resposta. De forma geral, o valor {Número de Caracteres x Número de Trechos} é adicionado à janela de contexto do modelo. Clique <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">aqui</a> para mais informações. - + Document snippet size (characters) I translated "snippet" as "trecho" to make the term feel more natural and understandable in Portuguese. "Trecho" effectively conveys the idea of a portion or section of a document, fitting well within the context, whereas a more literal translation might sound less intuitive or awkward for users. Tamanho do trecho de documento (caracteres) - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. Número de caracteres por trecho de documento. Valores maiores aumentam a chance de respostas factuais, mas também tornam a geração mais lenta. - + Max document snippets per prompt Máximo de Trechos de Documento por Prompt - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. Número máximo de trechos de documentos a serem adicionados ao contexto do prompt. Valores maiores aumentam a chance de respostas factuais, mas também tornam a geração mais lenta. @@ -1361,17 +1361,17 @@ modelo instalado para funcionar LocalDocsView - + LocalDocs LocalDocs - + Chat with your local files Converse com seus arquivos locais - + + Add Collection + Adicionar Coleção @@ -1380,102 +1380,102 @@ modelo instalado para funcionar ERRO: O banco de dados do LocalDocs não é válido. - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. <h3>ERRO: Não foi possível acessar o banco de dados do LocalDocs ou ele não é válido.</h3><br><i>Observação: Será necessário reiniciar o aplicativo após tentar qualquer uma das seguintes correções sugeridas.</i><br><ul><li>Certifique-se de que a pasta definida como <b>Caminho de Download</b> existe no sistema de arquivos.</li><li>Verifique a propriedade, bem como as permissões de leitura e gravação do <b>Caminho de Download</b>.</li><li>Se houver um arquivo <b>localdocs_v2.db</b>, verifique também sua propriedade e permissões de leitura/gravação.</li></ul><br>Se o problema persistir e houver algum arquivo 'localdocs_v*.db' presente, como último recurso, você pode<br>tentar fazer backup deles e removê-los. No entanto, você terá que recriar suas coleções. - + No Collections Installed Nenhuma Coleção Instalada - + Install a collection of local documents to get started using this feature Instale uma coleção de documentos locais para começar a usar este recurso - + + Add Doc Collection + Adicionar Coleção de Documentos - + Shows the add model view Mostra a visualização para adicionar modelo - + Indexing progressBar Barra de progresso de indexação - + Shows the progress made in the indexing Mostra o progresso da indexação - + ERROR ERRO - + INDEXING INDEXANDO - + EMBEDDING INCORPORANDO - + REQUIRES UPDATE REQUER ATUALIZAÇÃO - + READY PRONTO - + INSTALLING INSTALANDO - + Indexing in progress Indexação em andamento - + Embedding in progress Incorporação em andamento - + This collection requires an update after version change Esta coleção precisa ser atualizada após a mudança de versão - + Automatically reindexes upon changes to the folder Reindexa automaticamente após alterações na pasta - + Installation in progress Instalação em andamento - + % % - + %n file(s) %n arquivo(s) @@ -1483,7 +1483,7 @@ modelo instalado para funcionar - + %n word(s) %n palavra(s) @@ -1491,27 +1491,27 @@ modelo instalado para funcionar - + Remove Remover - + Rebuild Reconstruir - + Reindex this folder from scratch. This is slow and usually not needed. eindexar pasta do zero. Lento e geralmente desnecessário. - + Update Atualizar - + Update the collection to the new version. This is a slow operation. Atualizar coleção para nova versão. Pode demorar. @@ -1519,67 +1519,78 @@ modelo instalado para funcionar ModelList - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>É necessária uma chave de API da OpenAI.</li><li>AVISO: Seus chats serão enviados para a OpenAI!</li><li>Sua chave de API será armazenada localmente</li><li>Ela será usada apenas para comunicação com a OpenAI</li><li>Você pode solicitar uma chave de API <a href="https://platform.openai.com/account/api-keys">aqui.</a></li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 <strong>Modelo ChatGPT GPT-3.5 Turbo da OpenAI</strong><br> %1 - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 <strong>Modelo ChatGPT GPT-4 da OpenAI</strong><br> %1 %2 - + <strong>Mistral Tiny model</strong><br> %1 <strong>Modelo Mistral Tiny</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 <strong>Modelo Mistral Small</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 <strong>Modelo Mistral Medium</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* Mesmo que você pague pelo ChatGPT-4 da OpenAI, isso não garante acesso à chave de API. Contate a OpenAI para mais informações. - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) %1 (%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>Modelo de API Compatível com OpenAI</strong><br><ul><li>Chave da API: %1</li><li>URL Base: %2</li><li>Nome do Modelo: %3</li></ul> - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>É necessária uma chave de API da Mistral.</li><li>AVISO: Seus chats serão enviados para a Mistral!</li><li>Sua chave de API será armazenada localmente</li><li>Ela será usada apenas para comunicação com a Mistral</li><li>Você pode solicitar uma chave de API <a href="https://console.mistral.ai/user/api-keys">aqui</a>.</li> - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>É necessária uma chave de API e a URL da API.</li><li>AVISO: Seus chats serão enviados para o servidor de API compatível com OpenAI que você especificou!</li><li>Sua chave de API será armazenada no disco</li><li>Será usada apenas para comunicação com o servidor de API compatível com OpenAI</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>Conectar a um servidor de API compatível com OpenAI</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>Criado por %1.</strong><br><ul><li>Publicado em %2.<li>Este modelo tem %3 curtidas.<li>Este modelo tem %4 downloads.<li>Mais informações podem ser encontradas <a href="https://huggingface.co/%5">aqui.</a></ul> @@ -1587,92 +1598,92 @@ modelo instalado para funcionar ModelSettings - + Model Modelo - + Model Settings Configurações do Modelo - + Clone Clonar - + Remove Remover - + Name Nome - + Model File Arquivo do Modelo - + System Prompt Prompt do Sistema - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. Prefixado no início de cada conversa. Deve conter os tokens de enquadramento apropriados. - + Prompt Template Modelo de Prompt - + The template that wraps every prompt. Modelo para cada prompt. - + Must contain the string "%1" to be replaced with the user's input. Deve incluir "%1" para a entrada do usuário. - + Chat Name Prompt Prompt para Nome do Chat - + Prompt used to automatically generate chat names. Prompt usado para gerar automaticamente nomes de chats. - + Suggested FollowUp Prompt Prompt de Sugestão de Acompanhamento - + Prompt used to generate suggested follow-up questions. Prompt usado para gerar sugestões de perguntas. - + Context Length Tamanho do Contexto - + Number of input and output tokens the model sees. Tamanho da Janela de Contexto. - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. @@ -1681,128 +1692,128 @@ Usar mais contexto do que o modelo foi treinado pode gerar resultados ruins. Obs.: Só entrará em vigor após recarregar o modelo. - + Temperature Temperatura - + Randomness of model output. Higher -> more variation. Aleatoriedade das respostas. Quanto maior, mais variadas. - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. Aumenta a chance de escolher tokens menos prováveis. Obs.: Uma temperatura mais alta gera resultados mais criativos, mas menos previsíveis. - + Top-P Top-P - + Nucleus Sampling factor. Lower -> more predictable. Amostragem por núcleo. Menor valor, respostas mais previsíveis. - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. Apenas tokens com probabilidade total até o valor de top_p serão escolhidos. Obs.: Evita tokens muito improváveis. - + Min-P Min-P - + Minimum token probability. Higher -> more predictable. Probabilidade mínima do token. Quanto maior -> mais previsível. - + Sets the minimum relative probability for a token to be considered. Define a probabilidade relativa mínima para um token ser considerado. - + Top-K Top-K - + Size of selection pool for tokens. Número de tokens considerados na amostragem. - + Only the top K most likely tokens will be chosen from. Serão escolhidos apenas os K tokens mais prováveis. - + Max Length Comprimento Máximo - + Maximum response length, in tokens. Comprimento máximo da resposta, em tokens. - + Prompt Batch Size Tamanho do Lote de Processamento - + The batch size used for prompt processing. Tokens processados por lote. - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. Quantidade de tokens de prompt para processar de uma vez. OBS.: Valores mais altos podem acelerar a leitura dos prompts, mas usarão mais RAM. - + Repeat Penalty Penalidade de Repetição - + Repetition penalty factor. Set to 1 to disable. Penalidade de Repetição (1 para desativar). - + Repeat Penalty Tokens Tokens para penalizar repetição - + Number of previous tokens used for penalty. Número de tokens anteriores usados para penalidade. - + GPU Layers Camadas na GPU - + Number of model layers to load into VRAM. Camadas Carregadas na GPU. - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1814,217 +1825,217 @@ Obs.: Só entrará em vigor após recarregar o modelo. ModelsView - + No Models Installed Nenhum Modelo Instalado - + Install a model to get started using GPT4All Instale um modelo para começar a usar o GPT4All - - + + + Add Model + Adicionar Modelo - + Shows the add model view Mostra a visualização para adicionar modelo - + Installed Models Modelos Instalados - + Locally installed chat models Modelos de chat instalados localmente - + Model file Arquivo do modelo - + Model file to be downloaded Arquivo do modelo a ser baixado - + Description Descrição - + File description Descrição do arquivo - + Cancel Cancelar - + Resume Retomar - + Stop/restart/start the download Parar/reiniciar/iniciar o download - + Remove Remover - + Remove model from filesystem Remover modelo do sistema de arquivos - - + + Install Instalar - + Install online model Instalar modelo online - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Erro</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">AVISO: Não recomendado para seu hardware. O modelo requer mais memória (%1 GB) do que seu sistema tem disponível (%2).</strong></font> - + ERROR: $API_KEY is empty. ERRO: A $API_KEY está vazia. - + ERROR: $BASE_URL is empty. ERRO: A $BASE_URL está vazia. - + enter $BASE_URL inserir a $BASE_URL - + ERROR: $MODEL_NAME is empty. ERRO: O $MODEL_NAME está vazio. - + enter $MODEL_NAME inserir o $MODEL_NAME - + %1 GB %1 GB - + ? ? - + Describes an error that occurred when downloading Descreve um erro que ocorreu durante o download - + Error for incompatible hardware Erro para hardware incompatível - + Download progressBar Barra de progresso do download - + Shows the progress made in the download Mostra o progresso do download - + Download speed Velocidade de download - + Download speed in bytes/kilobytes/megabytes per second Velocidade de download em bytes/kilobytes/megabytes por segundo - + Calculating... Calculando... - - - - + + + + Whether the file hash is being calculated Se o hash do arquivo está sendo calculado - + Busy indicator Indicador de ocupado - + Displayed when the file hash is being calculated Exibido quando o hash do arquivo está sendo calculado - + enter $API_KEY inserir $API_KEY - + File size Tamanho do arquivo - + RAM required RAM necessária - + Parameters Parâmetros - + Quant Quant - + Type Tipo @@ -2032,12 +2043,12 @@ Obs.: Só entrará em vigor após recarregar o modelo. MyFancyLink - + Fancy link Link personalizado - + A stylized link Um link personalizado @@ -2045,7 +2056,7 @@ Obs.: Só entrará em vigor após recarregar o modelo. MySettingsStack - + Please choose a directory Escolha um diretório @@ -2053,12 +2064,12 @@ Obs.: Só entrará em vigor após recarregar o modelo. MySettingsTab - + Restore Defaults Restaurar Configurações Padrão - + Restores settings dialog to a default state Restaura as configurações para o estado padrão @@ -2066,12 +2077,12 @@ Obs.: Só entrará em vigor após recarregar o modelo. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. Contribuir com dados para o Datalake de código aberto GPT4All. - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2084,47 +2095,47 @@ Quando um modelo GPT4All responder a você e você tiver optado por participar, OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake de Código Aberto do GPT4All. Você não deve ter nenhuma expectativa de privacidade no chat quando este recurso estiver ativado. No entanto, você deve ter a expectativa de uma atribuição opcional, se desejar. Seus dados de chat estarão disponíveis para qualquer pessoa baixar e serão usados pela Nomic AI para melhorar os futuros modelos GPT4All. A Nomic AI manterá todas as informações de atribuição anexadas aos seus dados e você será creditado como colaborador em qualquer versão do modelo GPT4All que utilize seus dados! - + Terms for opt-in Termos de participação - + Describes what will happen when you opt-in Descrição do que acontece ao participar - + Please provide a name for attribution (optional) Forneça um nome para atribuição (opcional) - + Attribution (optional) Atribuição (opcional) - + Provide attribution Fornecer atribuição - + Enable Habilitar - + Enable opt-in Ativar participação - + Cancel Cancelar - + Cancel opt-in Cancelar participação @@ -2132,17 +2143,17 @@ OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake NewVersionDialog - + New version is available Atualização disponível - + Update Atualizar agora - + Update to new version Baixa e instala a última versão do GPT4All @@ -2150,18 +2161,18 @@ OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake PopupDialog - + Reveals a shortlived help balloon Exibe uma dica rápida - + Busy indicator The literal translation of "busy indicator" as "indicador de ocupado" might create ambiguity in Portuguese, as it doesn't clearly convey whether the system is processing something or simply unavailable. "Progresso" (progress) was chosen to more clearly indicate that an activity is in progress and that the user should wait for its completion. Indicador de progresso - + Displayed when the popup is showing busy Visível durante o processamento @@ -2176,29 +2187,29 @@ OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake SettingsView - - + + Settings I used "Config" instead of "Configurações" to keep the UI concise and visually balanced. "Config" is a widely recognized abbreviation that maintains clarity while saving space, making the interface cleaner and more user-friendly, especially in areas with limited space. Config - + Contains various application settings Acessar as configurações do aplicativo - + Application Aplicativo - + Model Modelo - + LocalDocs LocalDocs @@ -2206,12 +2217,12 @@ OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake StartupDialog - + Welcome! Bem-vindo(a)! - + ### Release notes %1### Contributors %2 @@ -2220,17 +2231,17 @@ OBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake %2 - + Release notes Notas de lançamento - + Release notes for this version Notas de lançamento desta versão - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2261,71 +2272,71 @@ todas as informações de atribuição anexadas aos seus dados e você será cre versão do modelo GPT4All que utilize seus dados! - + Terms for opt-in Termos de participação - + Describes what will happen when you opt-in Descrição do que acontece ao participar - - + + Opt-in for anonymous usage statistics Enviar estatísticas de uso anônimas - - + + Yes Sim - + Allow opt-in for anonymous usage statistics Permitir o envio de estatísticas de uso anônimas - - + + No Não - + Opt-out for anonymous usage statistics Recusar envio de estatísticas de uso anônimas - + Allow opt-out for anonymous usage statistics Permitir recusar envio de estatísticas de uso anônimas - - + + Opt-in for network Aceitar na rede - + Allow opt-in for network Permitir aceitação na rede - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake Permitir compartilhamento anônimo de chats no Datalake GPT4All - + Opt-out for network Recusar na rede - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake Permitir recusar compartilhamento anônimo de chats no Datalake GPT4All @@ -2333,23 +2344,23 @@ versão do modelo GPT4All que utilize seus dados! SwitchModelDialog - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>Atenção:</b> Ao trocar o modelo a conversa atual será perdida. Continuar? - + Continue Continuar - + Continue with model loading Confirma a troca do modelo - - + + Cancel Cancelar @@ -2357,32 +2368,32 @@ versão do modelo GPT4All que utilize seus dados! ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) Editar resposta (opcional) - + Please provide a better response... Digite sua resposta... - + Submit Enviar - + Submits the user's response Enviar - + Cancel Cancelar - + Closes the response dialog Fecha a caixa de diálogo de resposta @@ -2390,125 +2401,125 @@ versão do modelo GPT4All que utilize seus dados! main - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>Ocorreu um erro ao iniciar:</h3><br><i>"Hardware incompatível detectado."</i><br><br>Infelizmente, seu processador não atende aos requisitos mínimos para executar este programa. Especificamente, ele não possui suporte às instruções AVX, que são necessárias para executar modelos de linguagem grandes e modernos. A única solução, no momento, é atualizar seu hardware para um processador mais recente.<br><br>Para mais informações, consulte: <a href="https://pt.wikipedia.org/wiki/Advanced_Vector_Extensions">https://pt.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + GPT4All v%1 GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>Ocorreu um erro ao iniciar:</h3><br><i>"Não foi possível acessar o arquivo de configurações."</i><br><br>Infelizmente, algo está impedindo o programa de acessar o arquivo de configurações. Isso pode acontecer devido a permissões incorretas na pasta de configurações do aplicativo. Para obter ajuda, acesse nosso <a href="https://discord.gg/4M2QFmTt2k">canal no Discord</a>. - + Connection to datalake failed. Falha na conexão com o datalake. - + Saving chats. Salvando chats. - + Network dialog Avisos de rede - + opt-in to share feedback/conversations permitir compartilhamento de feedback/conversas - + Home view Tela inicial - + Home view of application Tela inicial do aplicativo - + Home Início - + Chat view Visualização do Chat - + Chat view to interact with models Visualização do chat para interagir com os modelos - + Chats Chats - - + + Models Modelos - + Models view for installed models Tela de modelos instalados - - + + LocalDocs LocalDocs - + LocalDocs view to configure and use local docs Tela de configuração e uso de documentos locais do LocalDocs - - + + Settings Config - + Settings view for application configuration Tela de configurações do aplicativo - + The datalake is enabled O datalake está ativado - + Using a network model Usando um modelo de rede - + Server mode is enabled Modo servidor ativado - + Installed models Modelos instalados - + View of installed models Exibe os modelos instalados diff --git a/gpt4all-chat/translations/gpt4all_ro_RO.ts b/gpt4all-chat/translations/gpt4all_ro_RO.ts index 703e5b140210..a1eefb889c12 100644 --- a/gpt4all-chat/translations/gpt4all_ro_RO.ts +++ b/gpt4all-chat/translations/gpt4all_ro_RO.ts @@ -4,12 +4,12 @@ AddCollectionView - + ← Existing Collections ← Colecţiile curente - + Add Document Collection Adaugă o Colecţie de documente @@ -19,52 +19,52 @@ Adaugă un folder care conţine fişiere în cu text-simplu, PDF sau Markdown. Extensii suplimentare pot fi specificate în Configurare. - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. Adaugă un folder cu fişiere în format text, PDF sau Markdown. Alte extensii pot fi specificate în Configurare. - + Please choose a directory Selectează un director/folder - + Name Denumire - + Collection name... Denumirea Colecţiei... - + Name of the collection to add (Required) Denumirea Colecţiei de adăugat (necesar) - + Folder Folder - + Folder path... Calea spre folder... - + Folder path to documents (Required) Calea spre documente (necesar) - + Browse Căutare - + Create Collection Creează Colecţia @@ -72,174 +72,174 @@ AddModelView - + ← Existing Models ← Modelele curente/instalate - + Explore Models Caută modele - + Discover and download models by keyword search... Caută şi descarcă modele după un cuvânt-cheie... - + Text field for discovering and filtering downloadable models Câmp pentru căutarea şi filtrarea modelelor ce pot fi descărcate - + Initiate model discovery and filtering Iniţiază căutarea şi filtrarea modelelor - + Triggers discovery and filtering of models Activează căutarea şi filtrarea modelelor - + Default Implicit - + Likes Likes - + Downloads Download-uri - + Recent Recent/e - + Asc Asc. (A->Z) - + Desc Desc. (Z->A) - + None Niciunul - + Searching · %1 Căutare · %1 - + Sort by: %1 Ordonare după: %1 - + Sort dir: %1 Sensul ordonării: %1 - + Limit: %1 Límită: %1 - + Network error: could not retrieve %1 Eroare de reţea: nu se poate prelua %1 - - + + Busy indicator Indicator de activitate - + Displayed when the models request is ongoing Afişat în timpul solicitării modelului - + Model file Fişierul modelului - + Model file to be downloaded Fişierul modelului de descărcat - + Description Descriere - + File description Descrierea fişierului - + Cancel Anulare - + Resume Continuare - + Download Download - + Stop/restart/start the download Opreşte/Reporneşte/Începe descărcarea - + Remove Şterge - + Remove model from filesystem Şterge modelul din sistemul de fişiere - - + + Install Instalare - + Install online model Instalez un model din online - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Eroare</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">ATENŢIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are acest sistem (%2).</strong></font> @@ -250,18 +250,18 @@ <strong><font size="2">ATENţIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are acest sistem (%2).</strong></font> - + %1 GB %1 GB - - + + ? ? - + Describes an error that occurred when downloading Descrie eroarea apărută în timpul descărcării @@ -271,100 +271,100 @@ <strong><font size="1"><a href="#eroare">Eroare</a></strong></font> - + Error for incompatible hardware Eroare: hardware incompatibil - + Download progressBar Progresia descărcării - + Shows the progress made in the download Afişează progresia descărcării - + Download speed Viteza de download - + Download speed in bytes/kilobytes/megabytes per second Viteza de download în bytes/kilobytes/megabytes pe secundă - + Calculating... Calculare... - - - - + + + + Whether the file hash is being calculated Dacă se calculează hash-ul fişierului - + Displayed when the file hash is being calculated Se afişează când se calculează hash-ul fişierului - + ERROR: $API_KEY is empty. EROARE: $API_KEY absentă - + enter $API_KEY introdu cheia $API_KEY - + ERROR: $BASE_URL is empty. EROARE: $BASE_URL absentă - + enter $BASE_URL introdu $BASE_URL - + ERROR: $MODEL_NAME is empty. EROARE: $MODEL_NAME absent - + enter $MODEL_NAME introdu $MODEL_NAME - + File size Dimensiunea fişierului - + RAM required RAM necesară - + Parameters Parametri - + Quant Quant(ificare) - + Type Tip @@ -372,17 +372,17 @@ ApplicationSettings - + Application Aplicaţie/Program - + Network dialog Reţea - + opt-in to share feedback/conversations optional: partajarea (share) de comentarii/conversatii @@ -398,57 +398,57 @@ EROARE: Sistemul de actualizare nu poate găsi componenta MaintenanceTool<br> necesară căutării de versiuni noi!<br><br> Ai instalat acest program folosind kitul online? Dacă da,<br> atunci MaintenanceTool trebuie să fie un nivel mai sus de folderul<br> unde ai instalat programul.<br><br> Dacă nu poate fi lansată manual, atunci programul trebuie reinstalat. - + Error dialog Eroare - + Application Settings Configurarea programului - + General General - + Theme Tema pentru interfaţă - + The application color scheme. Schema de culori a programului. - + Dark Întunecat - + Light Luminos - + LegacyDark Întunecat-vechi - + Font Size Dimensiunea textului - + The size of text in the application. Dimensiunea textului în program. - + Device Dispozitiv/Device @@ -458,7 +458,7 @@ Dispozitivul de calcul utilizat pentru generarea de text. "Auto" apelează la Vulkan sau la Metal. - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -469,138 +469,138 @@ EROARE: Sistemul de Update nu poate găsi componenta MaintenanceTool<br> necesară căutării de versiuni noi!<br><br> Ai instalat acest program folosind kitul online? Dacă da,<br> atunci MaintenanceTool trebuie să fie un nivel mai sus de folderul<br> unde ai instalat programul.<br><br> Dacă nu poate fi lansată manual, atunci programul trebuie reinstalat. - + Small Mic - + Medium Mediu - + Large Mare - + Language and Locale Limbă şi Localizare - + The language and locale you wish to use. Limba şi Localizarea de utilizat. - + System Locale Localizare - + The compute device used for text generation. Dispozitivul de calcul utilizat pentru generarea de text. - - + + Application default Implicit - + Default Model Modelul implicit - + The preferred model for new chats. Also used as the local server fallback. Modelul preferat pentru noile conversaţii. Va fi folosit drept rezervă pentru serverul local. - + Suggestion Mode Modul de sugerare - + Generate suggested follow-up questions at the end of responses. Generarea de întrebări pentru continuare, la finalul replicilor. - + When chatting with LocalDocs Când se discută cu LocalDocs - + Whenever possible Oricând e posibil - + Never Niciodată - + Download Path Calea pentru download - + Where to store local models and the LocalDocs database. Unde să fie plasate modelele şi baza de date LocalDocs. - + Browse Căutare - + Choose where to save model files Selectează locul unde vor fi plasate fişierele modelelor - + Enable Datalake Activează DataLake - + Send chats and feedback to the GPT4All Open-Source Datalake. Trimite conversaţii şi comentarii către componenta Open-source DataLake a GPT4All. - + Advanced Avansate - + CPU Threads Thread-uri CPU - + The number of CPU threads used for inference and embedding. Numărul de thread-uri CPU utilizate pentru inferenţă şi embedding. - + Save Chat Context Salvarea contextului conversaţiei - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. Salvează pe disc starea modelului pentru încărcare mai rapidă. ATENŢIE: Consumă ~2GB/conversaţie. - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. Activează pe localhost un Server compatibil cu Open-AI. ATENŢIE: Creşte consumul de resurse. @@ -610,7 +610,7 @@ Salvează pe disc starea modelului pentru încărcare mai rapidă. ATENŢIE: Consumă ~2GB/conversaţie. - + Enable Local Server Activează Serverul local @@ -620,27 +620,27 @@ Activează pe localhost un Server compatibil cu Open-AI. ATENŢIE: Creşte consumul de resurse. - + API Server Port Portul Serverului API - + The port to use for the local server. Requires restart. Portul utilizat pentru Serverul local. Necesită repornirea programului. - + Check For Updates Caută update-uri - + Manually check for an update to GPT4All. Caută manual update-uri pentru GPT4All. - + Updates Update-uri/Actualizări @@ -648,13 +648,13 @@ Chat - - + + New Chat Conversaţie Nouă - + Server Chat Conversaţie cu Serverul @@ -662,12 +662,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server EROARE: Eroare de reţea - conectarea la serverul API - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished - eroare: HTTP Error %1 %2 @@ -675,62 +675,62 @@ ChatDrawer - + Drawer Sertar - + Main navigation drawer Sertarul principal de navigare - + + New Chat + Conversaţie nouă - + Create a new chat Creează o Conversaţie nouă - + Select the current chat or edit the chat when in edit mode Selectează conversaţia curentă sau editeaz-o când eşti în modul editare - + Edit chat name Editează denumirea conversaţiei - + Save chat name Salvează denumirea conversaţiei - + Delete chat Şterge conversaţia - + Confirm chat deletion CONFIRMĂ ştergerea conversaţiei - + Cancel chat deletion ANULEAZĂ ştergerea conversaţiei - + List of chats Lista conversaţiilor - + List of chats in the drawer dialog Lista conversaţiilor în secţiunea-sertar @@ -738,32 +738,32 @@ ChatListModel - + TODAY ASTĂZI - + THIS WEEK SĂPTĂMÂNA ACEASTA - + THIS MONTH LUNA ACEASTA - + LAST SIX MONTHS ULTIMELE ŞASE LUNI - + THIS YEAR ANUL ACESTA - + LAST YEAR ANUL TRECUT @@ -771,113 +771,113 @@ ChatView - + <h3>Warning</h3><p>%1</p> <h3>Atenţie</h3><p>%1</p> - + Switch model dialog Schimbarea modelului - + Warn the user if they switch models, then context will be erased Avertizează utilizatorul că la schimbarea modelului va fi şters contextul - + Conversation copied to clipboard. Conversaţia a fost plasată în Clipboard. - + Code copied to clipboard. Codul a fost plasat în Clipboard. - + Chat panel Secţiunea de chat - + Chat panel with options Secţiunea de chat cu opţiuni - + Reload the currently loaded model Reîncarcă modelul curent - + Eject the currently loaded model Ejectează modelul curent - + No model installed. Niciun model instalat. - + Model loading error. Eroare la încărcarea modelului. - + Waiting for model... Se aşteaptă modelul... - + Switching context... Se schimbă contextul... - + Choose a model... Selectează un model... - + Not found: %1 Absent: %1 - + The top item is the current model Primul element e modelul curent - - + + LocalDocs LocalDocs - + Add documents Adaug documente - + add collections of documents to the chat adaugă Colecţii de documente la conversaţie - + Load the default model Încarcă modelul implicit - + Loads the default model which can be changed in settings Încarcă modelul implicit care poate fi stabilit în Configurare - + No Model Installed Niciun model instalat @@ -887,7 +887,7 @@ GPT4All necesită cel puţin un model pentru a putea porni - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>EROARE la încărcarea modelului:</h3><br><i>"%1"</i><br><br>Astfel @@ -906,38 +906,38 @@ se oferă ajutor - + GPT4All requires that you install at least one model to get started GPT4All necesită cel puţin un model pentru a putea rula - + Install a Model Instalează un model - + Shows the add model view Afişează secţiunea de adăugare a unui model - + Conversation with the model Conversaţie cu modelul - + prompt / response pairs from the conversation perechi prompt/replică din conversaţie - + GPT4All GPT4All - + You Tu @@ -946,98 +946,98 @@ model to get started se recalculează contextul... - + response stopped ... replică întreruptă... - + processing ... procesare... - + generating response ... se generează replica... - + generating questions ... se generează întrebări... - - + + Copy Copiere - + Copy Message Copiez mesajul - + Disable markdown Dezactivez markdown - + Enable markdown Activez markdown - + Thumbs up Bravo - + Gives a thumbs up to the response Dă un Bravo acestei replici - + Thumbs down Aiurea - + Opens thumbs down dialog Deschide reacţia Aiurea - + Suggested follow-ups Continuări sugerate - + Erase and reset chat session Şterge şi resetează sesiunea de chat - + Copy chat session to clipboard Copiez sesiunea de chat (conversaţia) în Clipboard - + Redo last chat response Reface ultima replică - + Stop generating Opreşte generarea - + Stop the current response generation Opreşte generarea replicii curente - + Reloads the model Reîncarc modelul @@ -1072,38 +1072,38 @@ model to get started se oferă ajutor - - + + Reload · %1 Reîncărcare · %1 - + Loading · %1 Încărcare · %1 - + Load · %1 (default) → Încarcă · %1 (implicit) → - + restoring from text ... restaurare din text... - + retrieving localdocs: %1 ... se preia din LocalDocs: %1 ... - + searching localdocs: %1 ... se caută în LocalDocs: %1 ... - + %n Source(s) %n Sursa @@ -1112,42 +1112,42 @@ model to get started - + Send a message... Trimite un mesaj... - + Load a model to continue... Încarcă un model pentru a continua... - + Send messages/prompts to the model Trimite mesaje/prompt-uri către model - + Cut Decupare (Cut) - + Paste Alipire (Paste) - + Select All Selectez tot - + Send message Trimit mesajul - + Sends the message/prompt contained in textfield to the model Trimite modelului mesajul/prompt-ul din câmpul-text @@ -1155,12 +1155,12 @@ model to get started CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results Atenţie: căutarea în Colecţii în timp ce sunt Indexate poate întoarce rezultate incomplete - + %n file(s) %n fişier @@ -1169,7 +1169,7 @@ model to get started - + %n word(s) %n cuvânt @@ -1178,17 +1178,17 @@ model to get started - + Updating Actualizare - + + Add Docs + Adaug documente - + Select a collection to make it available to the chat model. Selectează o Colecţie pentru ca modelul să o poată accesa. @@ -1196,37 +1196,37 @@ model to get started Download - + Model "%1" is installed successfully. Modelul "%1" - instalat cu succes. - + ERROR: $MODEL_NAME is empty. EROARE: $MODEL_NAME absent. - + ERROR: $API_KEY is empty. EROARE: $API_KEY absentă - + ERROR: $BASE_URL is invalid. EROARE: $API_KEY incorecta - + ERROR: Model "%1 (%2)" is conflict. EROARE: Model "%1 (%2)" conflictual. - + Model "%1 (%2)" is installed successfully. Modelul "%1 (%2)" - instalat cu succes. - + Model "%1" is removed. Modelul "%1" - îndepărtat @@ -1234,87 +1234,87 @@ model to get started HomeView - + Welcome to GPT4All Bun venit în GPT4All - + The privacy-first LLM chat application Programul ce prioritizează confidenţialitatea (privacy) - + Start chatting Începe o conversaţie - + Start Chatting Începe o conversaţie - + Chat with any LLM Dialoghează cu orice LLM - + LocalDocs LocalDocs - + Chat with your local files Dialoghează cu fişiere locale - + Find Models Caută modele - + Explore and download models Explorează şi descarcă modele - + Latest news Ultimele ştiri - + Latest news from GPT4All Ultimele ştiri de la GPT4All - + Release Notes Despre această versiune - + Documentation Documentaţie - + Discord Discord - + X (Twitter) X (Twitter) - + Github GitHub - + nomic.ai nomic.ai @@ -1323,7 +1323,7 @@ model to get started GitHub - + Subscribe to Newsletter Abonare la Newsletter @@ -1331,22 +1331,22 @@ model to get started LocalDocsSettings - + LocalDocs LocalDocs - + LocalDocs Settings Configurarea LocalDocs - + Indexing Indexare - + Allowed File Extensions Extensii compatibile de fişier @@ -1356,12 +1356,12 @@ model to get started Extensiile, separate prin virgulă. LocalDocs va încerca procesarea numai a fişierelor cu aceste extensii. - + Embedding Embedding - + Use Nomic Embed API Folosesc Nomic Embed API @@ -1371,7 +1371,7 @@ model to get started Embedding pe documente folosind API de la Nomic în locul unui model local. Necesită repornire. - + Nomic API Key Cheia API Nomic @@ -1382,72 +1382,72 @@ model to get started Cheia API de utilizat cu Nomic Embed. Obţine o cheie prin Atlas: <a href="https://atlas.nomic.ai/cli-login">pagina cheilor API</a> Necesită repornire. - + Embeddings Device Dispozitivul pentru Embeddings - + The compute device used for embeddings. Requires restart. Dispozitivul pentru Embeddings. Necesită repornire. - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. Extensiile, separate prin virgulă. LocalDocs va încerca procesarea numai a fişierelor cu aceste extensii. - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. Embedding pe documente folosind API de la Nomic în locul unui model local. Necesită repornire. - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. Cheia API de utilizat cu Nomic Embed. Obţine o cheie prin Atlas: <a href="https://atlas.nomic.ai/cli-login">pagina cheilor API</a> Necesită repornire. - + Application default Implicit - + Display Vizualizare - + Show Sources Afişarea Surselor - + Display the sources used for each response. Afişează Sursele utilizate pentru fiecare replică. - + Advanced Avansate - + Warning: Advanced usage only. Atenţie: Numai pentru utilizare avansată. - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. Valori prea mari pot cauza erori cu LocalDocs, replici foarte lente sau chiar absenţa lor. În mare, numărul {N caractere x N citate} este adăugat la Context Window/Size/Length a modelului. Mai multe informaţii: <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">aici</a>. - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. Numărul caracterelor din fiecare citat. Numere mari amplifică probabilitatea unor replici corecte, dar de asemenea cauzează generare lentă. - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. Numărul maxim al citatelor ce corespund şi care vor fi adăugate la contextul pentru prompt. Numere mari amplifică probabilitatea unor replici corecte, dar de asemenea cauzează generare lentă. @@ -1461,7 +1461,7 @@ model to get started Valori prea mari pot cauza erori cu LocalDocs, replici lente sau absenţa lor completă. în mare, numărul {N caractere x N citate} este adăugat la Context Window/Size/Length a modelului. Mai multe informaţii: <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">aici</a>. - + Document snippet size (characters) Lungimea (în caractere) a citatelor din documente @@ -1471,7 +1471,7 @@ model to get started numărul caracterelor din fiecare citat. Numere mari amplifică probabilitatea unor replici corecte, dar de asemenea pot cauza generare lentă. - + Max document snippets per prompt Numărul maxim de citate per prompt @@ -1485,17 +1485,17 @@ model to get started LocalDocsView - + LocalDocs LocalDocs - + Chat with your local files Dialoghează cu fişiere locale - + + Add Collection + Adaugă o Colecţie @@ -1504,102 +1504,102 @@ model to get started EROARE: Baza de date LocalDocs nu e validă. - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. EROARE: Baza de date LocalDocs nu poate fi accesată sau nu e validă. Programul trebuie repornit după ce se încearcă oricare din următoarele remedii sugerate.</i><br><ul><li>Asigură-te că folderul pentru <b>Download Path</b> există în sistemul de fişiere.</li><li>Verifică permisiunile şi apartenenţa folderului pentru <b>Download Path</b>.</li><li>Dacă există fişierul <b>localdocs_v2.db</b>, verifică-i apartenenţa şi permisiunile citire/scriere (read/write).</li></ul><br>Dacă problema persistă şi există vreun fişier 'localdocs_v*.db', ca ultimă soluţie poţi<br>încerca duplicarea (backup) şi apoi ştergerea lor. Oricum, va trebui să re-creezi Colecţiile. - + No Collections Installed Nu există Colecţii instalate - + Install a collection of local documents to get started using this feature Instalează o Colecţie de documente pentru a putea utiliza funcţionalitatea aceasta - + + Add Doc Collection + Adaugă o Colecţie de documente - + Shows the add model view Afişează secţiunea de adăugare a unui model - + Indexing progressBar Bara de progresie a Indexării - + Shows the progress made in the indexing Afişează progresia Indexării - + ERROR EROARE - + INDEXING ...SE INDEXEAZĂ... - + EMBEDDING ...EMBEDDINGs... - + REQUIRES UPDATE NECESITĂ UPDATE - + READY GATA - + INSTALLING ...INSTALARE... - + Indexing in progress Se Indexează... - + Embedding in progress ...Se calculează Embeddings... - + This collection requires an update after version change Această Colecţie necesită update după schimbarea versiunii - + Automatically reindexes upon changes to the folder Se reindexează automat după schimbări ale folderului - + Installation in progress ...Instalare în curs... - + % % - + %n file(s) %n fişier @@ -1608,7 +1608,7 @@ model to get started - + %n word(s) %n cuvânt @@ -1617,27 +1617,27 @@ model to get started - + Remove Şterg - + Rebuild Reconstrucţie - + Reindex this folder from scratch. This is slow and usually not needed. Reindexează de la zero acest folder. Procesul e lent şi de obicei inutil. - + Update Update/Actualizare - + Update the collection to the new version. This is a slow operation. Actualizează Colecţia la noua versiune. Această procedură e lentă. @@ -1661,67 +1661,78 @@ model to get started <strong>Modelul ChatGPT GPT-3.5 Turbo al OpenAI</strong><br> %1 - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) %1 (%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>Model API compatibil cu OpenAI</strong><br><ul><li>Cheia API: %1</li><li>Base URL: %2</li><li>Numele modelului: %3</li></ul> - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>Necesită o cheie API OpenAI personală. </li><li>ATENŢIE: Conversaţiile tale vor fi trimise la OpenAI!</li><li>Cheia ta API va fi stocată pe disc (local) </li><li>Va fi utilizată numai pentru comunicarea cu OpenAI</li><li>Poţi solicita o cheie API aici: <a href="https://platform.openai.com/account/api-keys">aici.</a></li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 <strong>Modelul OpenAI's ChatGPT GPT-3.5 Turbo</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* Chiar dacă plăteşti la OpenAI pentru ChatGPT-4, aceasta nu garantează accesul la cheia API. Contactează OpenAI pentru mai multe informaţii. - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 <strong>Modelul ChatGPT GPT-4 al OpenAI</strong><br> %1 %2 - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>Necesită cheia personală Mistral API. </li><li>ATENŢIE: Conversaţiile tale vor fi trimise la Mistral!</li><li>Cheia ta API va fi stocată pe disc (local)</li><li>Va fi utilizată numai pentru comunicarea cu Mistral</li><li>Poţi solicita o cheie API aici: <a href="https://console.mistral.ai/user/api-keys">aici</a>.</li> - + <strong>Mistral Tiny model</strong><br> %1 <strong>Modelul Mistral Tiny</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 <strong>Modelul Mistral Small</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 <strong>Modelul Mistral Medium</strong><br> %1 - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>Necesită cheia personală API si base-URL a API.</li><li>ATENŢIE: Conversaţiile tale vor fi trimise la serverul API compatibil cu OpenAI specificat!</li><li>Cheia ta API va fi stocată pe disc (local)</li><li>Va fi utilizată numai pentru comunicarea cu serverul API compatibil cu OpenAI</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>Conectare la un server API compatibil cu OpenAI</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>Creat de către %1.</strong><br><ul><li>Publicat in: %2.<li>Acest model are %3 Likes.<li>Acest model are %4 download-uri.<li>Mai multe informaţii pot fi găsite la: <a href="https://huggingface.co/%5">aici.</a></ul> @@ -1752,37 +1763,37 @@ model to get started ModelSettings - + Model Model - + Model Settings Configurez modelul - + Clone Clonez - + Remove Şterg - + Name Denumire - + Model File Fişierul modelului - + System Prompt System Prompt @@ -1792,12 +1803,12 @@ model to get started Plasat la Începutul fiecărei conversaţii. Trebuie să conţină token-uri(le) adecvate de Încadrare. - + Prompt Template Prompt Template - + The template that wraps every prompt. Standardul de formulare a fiecărui prompt. @@ -1807,32 +1818,32 @@ model to get started Trebuie să conţină textul "%1" care va fi Înlocuit cu ceea ce scrie utilizatorul. - + Chat Name Prompt Denumirea conversaţiei - + Prompt used to automatically generate chat names. Standardul de formulare a denumirii conversaţiilor. - + Suggested FollowUp Prompt Prompt-ul sugerat pentru a continua - + Prompt used to generate suggested follow-up questions. Prompt-ul folosit pentru generarea întrebărilor de continuare. - + Context Length Lungimea Contextului - + Number of input and output tokens the model sees. Numărul token-urilor de input şi de output văzute de model. @@ -1843,12 +1854,12 @@ model to get started Numărul maxim combinat al token-urilor în prompt+replică înainte de a se pierde informaţie. Utilizarea unui context mai mare decât cel cu care a fost instruit modelul va întoarce rezultate mai slabe. NOTĂ: Nu are efect până la reîncărcarea modelului. - + Temperature Temperatura - + Randomness of model output. Higher -> more variation. Libertatea/Confuzia din replica modelului. Mai mare -> mai multă libertate. @@ -1858,12 +1869,12 @@ model to get started Temperatura creşte probabilitatea de alegere a unor token-uri puţin probabile. NOTĂ: O temperatură tot mai înaltă determină replici tot mai creative şi mai puţin predictibile. - + Top-P Top-P - + Nucleus Sampling factor. Lower -> more predictable. Factorul de Nucleus Sampling. Mai mic -> predictibilitate mai mare. @@ -1873,92 +1884,92 @@ model to get started Pot fi alese numai cele mai probabile token-uri a căror probabilitate totală este Top-P. NOTĂ: Se evită selectarea token-urilor foarte improbabile. - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. Plasat la începutul fiecărei conversaţii. Trebuie să conţină token-uri(le) adecvate de încadrare. - + Must contain the string "%1" to be replaced with the user's input. Trebuie să conţină textul "%1" care va fi înlocuit cu ceea ce scrie utilizatorul. - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. Numărul maxim combinat al token-urilor în prompt+replică înainte de a se pierde informaţie. Utilizarea unui context mai mare decât cel cu care a fost instruit modelul va întoarce rezultate mai slabe. NOTĂ: Nu are efect până la reîncărcarea modelului. - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. Temperatura creşte probabilitatea de alegere a unor token-uri puţin probabile. NOTĂ: O temperatură tot mai înaltă determină replici tot mai creative şi mai puţin predictibile. - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. Pot fi alese numai cele mai probabile token-uri a căror probabilitate totală este Top-P. NOTĂ: Se evită selectarea token-urilor foarte improbabile. - + Min-P Min-P - + Minimum token probability. Higher -> more predictable. Probabilitatea mínimă a unui token. Mai mare -> mai predictibil. - + Sets the minimum relative probability for a token to be considered. Stabileşte probabilitatea minimă relativă a unui token de luat în considerare. - + Top-K Top-K - + Size of selection pool for tokens. Dimensiunea setului de token-uri. - + Only the top K most likely tokens will be chosen from. Se va alege numai din cele mai probabile K token-uri. - + Max Length Lungimea maximă - + Maximum response length, in tokens. Lungimea maximă - în token-uri - a replicii. - + Prompt Batch Size Prompt Batch Size - + The batch size used for prompt processing. Dimensiunea setului de token-uri citite simultan din prompt. - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. Numărul token-urilor procesate simultan. NOTĂ: Valori tot mai mari pot accelera citirea prompt-urilor, dar şi utiliza mai multă RAM. - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1970,32 +1981,32 @@ NOTE: Does not take effect until you reload the model. numărul token-urilor procesate simultan. NOTĂ: Valori tot mai mari pot accelera citirea prompt-urilor, dar şi utiliza mai multă RAM. - + Repeat Penalty Penalizarea pentru repetare - + Repetition penalty factor. Set to 1 to disable. Factorul de penalizare a repetării ce se dezactivează cu valoarea 1. - + Repeat Penalty Tokens Token-uri pentru penalizare a repetării - + Number of previous tokens used for penalty. Numărul token-urilor anterioare considerate pentru penalizare. - + GPU Layers Layere în GPU - + Number of model layers to load into VRAM. Numărul layerelor modelului ce vor fi Încărcate în VRAM. @@ -2010,89 +2021,89 @@ NOTE: Does not take effect until you reload the model. ModelsView - + No Models Installed Nu există modele instalate - + Install a model to get started using GPT4All Instalează un model pentru a începe să foloseşti GPT4All - - + + + Add Model + Adaugă un model - + Shows the add model view Afişează secţiunea de adăugare a unui model - + Installed Models Modele instalate - + Locally installed chat models Modele conversaţionale instalate local - + Model file Fişierul modelului - + Model file to be downloaded Fişierul modelului ce va fi descărcat - + Description Descriere - + File description Descrierea fişierului - + Cancel Anulare - + Resume Continuare - + Stop/restart/start the download Oprirea/Repornirea/Iniţierea descărcării - + Remove Şterg - + Remove model from filesystem Şterg modelul din sistemul de fişiere - - + + Install Instalează - + Install online model Instalez un model din online @@ -2108,130 +2119,130 @@ NOTE: Does not take effect until you reload the model. <strong><font size="2">ATENţIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are sistemul tău (%2).</strong></font> - + %1 GB %1 GB - + ? ? - + Describes an error that occurred when downloading Descrie o eroare apărută la download - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#eroare">Error</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">ATENŢIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are sistemul tău(%2).</strong></font> - + Error for incompatible hardware Eroare - hardware incompatibil - + Download progressBar Bara de progresie a descărcării - + Shows the progress made in the download Afişează progresia descărcării - + Download speed Viteza de download - + Download speed in bytes/kilobytes/megabytes per second Viteza de download în bytes/kilobytes/megabytes pe secundă - + Calculating... ...Se calculează... - - - - + + + + Whether the file hash is being calculated Dacă se va calcula hash-ul fişierului - + Busy indicator Indicator de activitate - + Displayed when the file hash is being calculated Afişat când se calculează hash-ul unui fişier - + ERROR: $API_KEY is empty. EROARE: $API_KEY absentă. - + enter $API_KEY introdu cheia $API_KEY - + ERROR: $BASE_URL is empty. EROARE: $BASE_URL absentă. - + enter $BASE_URL introdu $BASE_URL - + ERROR: $MODEL_NAME is empty. EROARE: $MODEL_NAME absent. - + enter $MODEL_NAME introdu $MODEL_NAME - + File size Dimensiunea fişierului - + RAM required RAM necesară - + Parameters Parametri - + Quant Quant(ificare) - + Type Tip @@ -2239,12 +2250,12 @@ NOTE: Does not take effect until you reload the model. MyFancyLink - + Fancy link Link haios - + A stylized link Un link cu stil @@ -2252,7 +2263,7 @@ NOTE: Does not take effect until you reload the model. MySettingsStack - + Please choose a directory Selectează un director (folder) @@ -2260,12 +2271,12 @@ NOTE: Does not take effect until you reload the model. MySettingsTab - + Restore Defaults Restaurez valorile implicite - + Restores settings dialog to a default state Restaurez secţiunea de configurare la starea sa implicită @@ -2273,7 +2284,7 @@ NOTE: Does not take effect until you reload the model. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. Contribuie cu date/informaţii la componenta Open-source DataLake a GPT4All. @@ -2311,7 +2322,7 @@ NOTE: Does not take effect until you reload the model. participant contribuitor la orice lansare a unui model GPT4All care foloseşte datele tale! - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2334,47 +2345,47 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O participant contribuitor la orice lansare a unui model GPT4All care foloseşte datele tale! - + Terms for opt-in Termenii pentru participare - + Describes what will happen when you opt-in Descrie ce se întâmplă când participi - + Please provide a name for attribution (optional) Specifică o denumire pentru această apreciere (opţional) - + Attribution (optional) Apreciere (opţional) - + Provide attribution Apreciază - + Enable Activează - + Enable opt-in Activează participarea - + Cancel Anulare - + Cancel opt-in Anulează participarea @@ -2382,17 +2393,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O NewVersionDialog - + New version is available O nouă versiune disponibilă! - + Update Update/Actualizare - + Update to new version Actualizează la noua versiune @@ -2400,17 +2411,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O PopupDialog - + Reveals a shortlived help balloon Afişează un mesaj scurt de asistenţă - + Busy indicator Indicator de activitate - + Displayed when the popup is showing busy Se afişează când procedura este în desfăşurare @@ -2418,28 +2429,28 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O SettingsView - - + + Settings Configurare - + Contains various application settings Conţine setări ale programului - + Application Program - + Model Model - + LocalDocs LocalDocs @@ -2447,7 +2458,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O StartupDialog - + Welcome! Bun venit! @@ -2460,12 +2471,12 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O %2 - + Release notes Despre versiune - + Release notes for this version Despre această versiune @@ -2517,7 +2528,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O care foloseşte datele tale! - + ### Release notes %1### Contributors %2 @@ -2526,7 +2537,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O %2 - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2562,71 +2573,71 @@ participant contribuitor la orice lansare a unui model GPT4All care foloseşte datele tale! - + Terms for opt-in Termenii pentru participare - + Describes what will happen when you opt-in Descrie ce se întâmplă când participi - - + + Opt-in for anonymous usage statistics Acceptă colectarea de statistici despre utilizare -anonimă- - - + + Yes Da - + Allow opt-in for anonymous usage statistics Acceptă participarea la colectarea de statistici despre utilizare -anonimă- - - + + No Nu - + Opt-out for anonymous usage statistics Anulează participarea la colectarea de statistici despre utilizare -anonimă- - + Allow opt-out for anonymous usage statistics Permite anularea participării la colectarea de statistici despre utilizare -anonimă- - - + + Opt-in for network Acceptă pentru reţea - + Allow opt-in for network Permite participarea pentru reţea - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake Permite participarea la partajarea (share) -anonimă- a conversaţiilor către DataLake a GPT4All - + Opt-out for network Refuz participarea, pentru reţea - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake Permite anularea participării la partajarea -anonimă- a conversaţiilor către DataLake a GPT4All @@ -2639,23 +2650,23 @@ care foloseşte datele tale! <b>Atenţie:</b> schimbarea modelului va şterge conversaţia curentă. Confirmi aceasta? - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>Atenţie:</b> schimbarea modelului va şterge conversaţia curentă. Confirmi aceasta? - + Continue Continuă - + Continue with model loading Continuă încărcarea modelului - - + + Cancel Anulare @@ -2663,32 +2674,32 @@ care foloseşte datele tale! ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) Te rog, editează textul de mai jos pentru a oferi o replică mai bună (opţional). - + Please provide a better response... Te rog, oferă o replică mai bună... - + Submit Trimite - + Submits the user's response Trimite răspunsul dat de utilizator - + Cancel Anulare - + Closes the response dialog Închide afişarea răspunsului @@ -2709,7 +2720,7 @@ care foloseşte datele tale! <h3>A apărut o eroare la iniţializare:; </h3><br><i>"Hardware incompatibil. "</i><br><br>Din păcate, procesorul (CPU) nu întruneşte condiţiile minime pentru a rula acest program. În particular, nu suportă instrucţiunile AVX pe care programul le necesită pentru a integra un model conversaţional modern. În acest moment, unica soluţie este să îţi aduci la zi sistemul hardware cu un CPU mai recent.<br><br>Aici sunt mai multe informaţii: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + GPT4All v%1 GPT4All v%1 @@ -2728,120 +2739,120 @@ care foloseşte datele tale! <h3>A apărut o eroare la iniţializare:; </h3><br><i>"Hardware incompatibil. "</i><br><br>Din păcate, procesorul (CPU) nu întruneşte condiţiile minime pentru a rula acest program. În particular, nu suportă instrucţiunile AVX pe care programul le necesită pentru a integra un model conversaţional modern. În acest moment, unica soluţie este să îţi aduci la zi sistemul hardware cu un CPU mai recent.<br><br>Aici sunt mai multe informaţii: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>A apărut o eroare la iniţializare:; </h3><br><i>"Hardware incompatibil. "</i><br><br>Din păcate, procesorul (CPU) nu întruneşte condiţiile minime pentru a rula acest program. În particular, nu suportă instrucţiunile AVX pe care programul le necesită pentru a integra un model conversaţional modern. În acest moment, unica soluţie este să îţi aduci la zi sistemul hardware cu un CPU mai recent.<br><br>Aici sunt mai multe informaţii: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>A apărut o eroare la iniţializare:; </h3><br><i>"Nu poate fi accesat fişierul de configurare a programului."</i><br><br>Din păcate, ceva împiedică programul în a accesa acel fişier. Cauza poate fi un set de permisiuni incorecte pe directorul/folderul local de configurare unde se află acel fişier. Poţi parcurge canalul nostru <a href="https://discord.gg/4M2QFmTt2k">Discord</a> unde vei putea primi asistenţă. - + Connection to datalake failed. Conectarea la DataLake a eşuat. - + Saving chats. Se salvează conversaţiile. - + Network dialog Dialogul despre reţea - + opt-in to share feedback/conversations acceptă partajarea (share) de comentarii/conversaţii - + Home view Secţiunea de Început - + Home view of application Secţiunea de Început a programului - + Home Prima<br>pagină - + Chat view Secţiunea conversaţiilor - + Chat view to interact with models Secţiunea de chat pentru interacţiune cu modele - + Chats Conversaţii - - + + Models Modele - + Models view for installed models Secţiunea modelelor instalate - - + + LocalDocs LocalDocs - + LocalDocs view to configure and use local docs Secţiunea LocalDocs de configurare şi folosire a Documentelor Locale - - + + Settings Configurare - + Settings view for application configuration Secţiunea de configurare a programului - + The datalake is enabled DataLake: ACTIV - + Using a network model Se foloseşte un model pe reţea - + Server mode is enabled Modul Server: ACTIV - + Installed models Modele instalate - + View of installed models Secţiunea modelelor instalate diff --git a/gpt4all-chat/translations/gpt4all_zh_CN.ts b/gpt4all-chat/translations/gpt4all_zh_CN.ts index 4bd6c3950891..5ee7e9b4f4fd 100644 --- a/gpt4all-chat/translations/gpt4all_zh_CN.ts +++ b/gpt4all-chat/translations/gpt4all_zh_CN.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections ← 存在集合 - + Add Document Collection 添加文档集合 - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. 添加一个包含纯文本文件、PDF或Markdown的文件夹。在“设置”中配置其他扩展。 - + Please choose a directory 请选择一个目录 - + Name 名称 - + Collection name... 集合名称 - + Name of the collection to add (Required) 集合名称 (必须) - + Folder 目录 - + Folder path... 目录地址 - + Folder path to documents (Required) 文档的目录地址(必须) - + Browse 查看 - + Create Collection 创建集合 @@ -67,22 +67,22 @@ AddModelView - + ← Existing Models ← 存在的模型 - + Explore Models 发现模型 - + Discover and download models by keyword search... 通过关键词查找并下载模型 ... - + Text field for discovering and filtering downloadable models 用于发现和筛选可下载模型的文本字段 @@ -91,32 +91,32 @@ 搜索中 - + Initiate model discovery and filtering 启动模型发现和过滤 - + Triggers discovery and filtering of models 触发模型的发现和筛选 - + Default 默认 - + Likes 喜欢 - + Downloads 下载 - + Recent 近期 @@ -125,12 +125,12 @@ 排序: - + Asc 升序 - + Desc 倒序 @@ -139,7 +139,7 @@ 排序目录: - + None @@ -152,145 +152,145 @@ 网络问题:无法访问 http://gpt4all.io/models/models3.json - + Searching · %1 搜索中 · %1 - + Sort by: %1 排序: %1 - + Sort dir: %1 排序目录: %1 - + Limit: %1 数量: %1 - + Network error: could not retrieve %1 网络错误:无法检索 %1 - - + + Busy indicator 繁忙程度 - + Displayed when the models request is ongoing 在模型请求进行中时显示 - + Model file 模型文件 - + Model file to be downloaded 待下载模型 - + Description 描述 - + File description 文件描述 - + Cancel 取消 - + Resume 继续 - + Download 下载 - + Stop/restart/start the download 停止/重启/开始下载 - + Remove 删除 - + Remove model from filesystem 从系统中删除模型 - - + + Install 安装 - + Install online model 安装在线模型 - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">错误</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">警告: 你的设备硬件不推荐 ,模型需要的内存 (%1 GB)比你的系统还要多 (%2).</strong></font> - + ERROR: $API_KEY is empty. 错误:$API_KEY 为空 - + ERROR: $BASE_URL is empty. 错误:$BASE_URL 为空 - + enter $BASE_URL 输入 $BASE_URL - + ERROR: $MODEL_NAME is empty. 错误:$MODEL_NAME为空 - + enter $MODEL_NAME 输入:$MODEL_NAME - + %1 GB %1 GB - - + + ? @@ -299,7 +299,7 @@ <a href="#error">错误</a> - + Describes an error that occurred when downloading 描述下载过程中发生的错误 @@ -316,60 +316,60 @@ 你的系统需要 ( - + Error for incompatible hardware 硬件不兼容的错误 - + Download progressBar 下载进度 - + Shows the progress made in the download 显示下载进度 - + Download speed 下载速度 - + Download speed in bytes/kilobytes/megabytes per second 下载速度 b/kb/mb /s - + Calculating... 计算中 - - - - + + + + Whether the file hash is being calculated 是否正在计算文件哈希 - + Displayed when the file hash is being calculated 在计算文件哈希时显示 - + enter $API_KEY 输入$API_KEY - + File size 文件大小 - + RAM required RAM 需要 @@ -378,17 +378,17 @@ GB - + Parameters 参数 - + Quant 量化 - + Type 类型 @@ -396,22 +396,22 @@ ApplicationSettings - + Application 应用 - + Network dialog 网络对话 - + opt-in to share feedback/conversations 选择加入以共享反馈/对话 - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -425,87 +425,87 @@ 如果无法手动启动它,那么恐怕您需要重新安装。 - + Error dialog 错误对话 - + Application Settings 应用设置 - + General 通用设置 - + Theme 主题 - + The application color scheme. 应用的主题颜色 - + Dark 深色 - + Light 亮色 - + LegacyDark LegacyDark - + Font Size 字体大小 - + The size of text in the application. 应用中的文本大小。 - + Small - + Medium - + Large - + Language and Locale 语言和本地化 - + The language and locale you wish to use. 你想使用的语言 - + System Locale 系统语言 - + Device 设备 @@ -514,138 +514,138 @@ 用于文本生成的计算设备. "自动" 使用 Vulkan or Metal. - + The compute device used for text generation. 设备用于文本生成 - - + + Application default 程序默认 - + Default Model 默认模型 - + The preferred model for new chats. Also used as the local server fallback. 新聊天的首选模式。也用作本地服务器回退。 - + Suggestion Mode 建议模式 - + Generate suggested follow-up questions at the end of responses. 在答复结束时生成建议的后续问题。 - + When chatting with LocalDocs 本地文档检索 - + Whenever possible 只要有可能 - + Never 从不 - + Download Path 下载目录 - + Where to store local models and the LocalDocs database. 本地模型和本地文档数据库存储目录 - + Browse 查看 - + Choose where to save model files 模型下载目录 - + Enable Datalake 开启数据湖 - + Send chats and feedback to the GPT4All Open-Source Datalake. 发送对话和反馈给GPT4All 的开源数据湖。 - + Advanced 高级 - + CPU Threads CPU线程 - + The number of CPU threads used for inference and embedding. 用于推理和嵌入的CPU线程数 - + Save Chat Context 保存对话上下文 - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. 保存模型's 状态以提供更快加载速度. 警告: 需用 ~2GB 每个对话. - + Enable Local Server 开启本地服务 - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. 将OpenAI兼容服务器暴露给本地主机。警告:导致资源使用量增加。 - + API Server Port API 服务端口 - + The port to use for the local server. Requires restart. 使用本地服务的端口,需要重启 - + Check For Updates 检查更新 - + Manually check for an update to GPT4All. 手动检查更新 - + Updates 更新 @@ -653,13 +653,13 @@ Chat - - + + New Chat 新对话 - + Server Chat 服务器对话 @@ -675,12 +675,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server 错误:连接到 API 服务器时发生网络错误 - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished 收到 HTTP 错误 %1 %2 @@ -688,62 +688,62 @@ ChatDrawer - + Drawer 抽屉 - + Main navigation drawer 导航 - + + New Chat + 新对话 - + Create a new chat 新对话 - + Select the current chat or edit the chat when in edit mode 选择当前的聊天或在编辑模式下编辑聊天 - + Edit chat name 修改对话名称 - + Save chat name 保存对话名称 - + Delete chat 删除对话 - + Confirm chat deletion 确认删除对话 - + Cancel chat deletion 取消删除对话 - + List of chats 对话列表 - + List of chats in the drawer dialog 对话框中的聊天列表 @@ -751,32 +751,32 @@ ChatListModel - + TODAY 今天 - + THIS WEEK 本周 - + THIS MONTH 本月 - + LAST SIX MONTHS 半年内 - + THIS YEAR 今年内 - + LAST YEAR 去年 @@ -792,27 +792,27 @@ lt;br><br>模型加载失败可能由多种原因造成,但最常见的原因包括文件格式错误、下载不完整或损坏、文件类型错误、系统 RAM 不足或模型类型不兼容。以下是解决该问题的一些建议:<br><ul><li>确保模型文件具有兼容的格式和类型<li>检查下载文件夹中的模型文件是否完整<li>您可以在设置对话框中找到下载文件夹<li>如果您已侧载模型,请通过检查 md5sum 确保文件未损坏<li>在我们的<a href="https://docs.gpt4all.io/">文档</a>中了解有关支持哪些模型的更多信息对于 gui<li>查看我们的<a href="https://discord.gg/4M2QFmTt2k">discord 频道</a> 获取帮助 - + <h3>Warning</h3><p>%1</p> <h3>警告</h3><p>%1</p> - + Switch model dialog 切换模型对话 - + Warn the user if they switch models, then context will be erased 如果用户切换模型,则警告用户,然后上下文将被删除 - + Conversation copied to clipboard. 复制对话到剪切板 - + Code copied to clipboard. 复制代码到剪切板 @@ -821,52 +821,52 @@ 响应: - + Chat panel 对话面板 - + Chat panel with options 对话面板选项 - + Reload the currently loaded model 重载当前模型 - + Eject the currently loaded model 弹出当前加载的模型 - + No model installed. 没有安装模型 - + Model loading error. 模型加载错误 - + Waiting for model... 稍等片刻 - + Switching context... 切换上下文 - + Choose a model... 选择模型 - + Not found: %1 没找到: %1 @@ -879,23 +879,23 @@ 载入中· - + The top item is the current model 当前模型的最佳选项 - - + + LocalDocs 本地文档 - + Add documents 添加文档 - + add collections of documents to the chat 将文档集合添加到聊天中 @@ -908,53 +908,53 @@ (默认) → - + Load the default model 载入默认模型 - + Loads the default model which can be changed in settings 加载默认模型,可以在设置中更改 - + No Model Installed 没有下载模型 - + GPT4All requires that you install at least one model to get started GPT4All要求您至少安装一个模型才能开始 - + Install a Model 下载模型 - + Shows the add model view 查看添加的模型 - + Conversation with the model 使用此模型对话 - + prompt / response pairs from the conversation 对话中的提示/响应对 - + GPT4All GPT4All - + You @@ -971,7 +971,7 @@ model to get started 重新生成上下文... - + response stopped ... 响应停止... @@ -984,176 +984,176 @@ model to get started 检索本地文档: - + processing ... 处理中 - + generating response ... 响应中... - + generating questions ... 生成响应 - - + + Copy 复制 - + Copy Message 复制内容 - + Disable markdown 不允许markdown - + Enable markdown 允许markdown - + Thumbs up 点赞 - + Gives a thumbs up to the response 点赞响应 - + Thumbs down 点踩 - + Opens thumbs down dialog 打开点踩对话框 - + Suggested follow-ups 建议的后续行动 - + Erase and reset chat session 擦除并重置聊天会话 - + Copy chat session to clipboard 复制对话到剪切板 - + Redo last chat response 重新生成上个响应 - + Stop generating 停止生成 - + Stop the current response generation 停止当前响应 - + Reloads the model 重载模型 - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>加载模型时遇到错误:</h3><br><i><%1></i><br><br>模型加载失败可能由多种原因引起,但最常见的原因包括文件格式错误、下载不完整或损坏、文件类型错误、系统 RAM 不足或模型类型不兼容。以下是一些解决问题的建议:<br><ul><li>确保模型文件具有兼容的格式和类型<li>检查下载文件夹中的模型文件是否完整<li>您可以在设置对话框中找到下载文件夹<li>如果您已侧载模型,请通过检查 md5sum 确保文件未损坏<li>在我们的 <a href="https://docs.gpt4all.io/">文档</a> 中了解有关 gui 支持哪些模型的更多信息<li>查看我们的 <a href="https://discord.gg/4M2QFmTt2k">discord 频道</a> 以获取帮助 - - + + Reload · %1 重载 · %1 - + Loading · %1 载入中 · %1 - + Load · %1 (default) → 载入 · %1 (默认) → - + restoring from text ... 从文本恢复中 - + retrieving localdocs: %1 ... 检索本地文档: %1 ... - + searching localdocs: %1 ... 搜索本地文档: %1 ... - + %n Source(s) %n 资源 - + Send a message... 发送消息... - + Load a model to continue... 选择模型并继续 - + Send messages/prompts to the model 发送消息/提示词给模型 - + Cut 剪切 - + Paste 粘贴 - + Select All 全选 - + Send message 发送消息 - + Sends the message/prompt contained in textfield to the model 将文本框中包含的消息/提示发送给模型 @@ -1161,36 +1161,36 @@ model to get started CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results 提示: 索引时搜索集合可能会返回不完整的结果 - + %n file(s) - + %n word(s) - + Updating 更新中 - + + Add Docs + 添加文档 - + Select a collection to make it available to the chat model. 选择一个集合,使其可用于聊天模型。 @@ -1198,37 +1198,37 @@ model to get started Download - + Model "%1" is installed successfully. 模型 "%1" 安装成功 - + ERROR: $MODEL_NAME is empty. 错误:$MODEL_NAME 为空 - + ERROR: $API_KEY is empty. 错误:$API_KEY为空 - + ERROR: $BASE_URL is invalid. 错误:$BASE_URL 非法 - + ERROR: Model "%1 (%2)" is conflict. 错误: 模型 "%1 (%2)" 有冲突. - + Model "%1 (%2)" is installed successfully. 模型 "%1 (%2)" 安装成功. - + Model "%1" is removed. 模型 "%1" 已删除. @@ -1236,92 +1236,92 @@ model to get started HomeView - + Welcome to GPT4All 欢迎 - + The privacy-first LLM chat application 隐私至上的大模型咨询应用程序 - + Start chatting 开始聊天 - + Start Chatting 开始聊天 - + Chat with any LLM 大预言模型聊天 - + LocalDocs 本地文档 - + Chat with your local files 本地文件聊天 - + Find Models 查找模型 - + Explore and download models 发现并下载模型 - + Latest news 新闻 - + Latest news from GPT4All GPT4All新闻 - + Release Notes 发布日志 - + Documentation 文档 - + Discord Discord - + X (Twitter) X (Twitter) - + Github Github - + nomic.ai nomic.ai - + Subscribe to Newsletter 订阅信息 @@ -1329,117 +1329,117 @@ model to get started LocalDocsSettings - + LocalDocs 本地文档 - + LocalDocs Settings 本地文档设置 - + Indexing 索引中 - + Allowed File Extensions 添加文档扩展名 - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. 逗号分隔的列表。LocalDocs 只会尝试处理具有这些扩展名的文件 - + Embedding Embedding - + Use Nomic Embed API 使用 Nomic 内部 API - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. 使用快速的 Nomic API 嵌入文档,而不是使用私有本地模型 - + Nomic API Key Nomic API Key - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. Nomic Embed 使用的 API 密钥。请访问官网获取,需要重启。 - + Embeddings Device Embeddings 设备 - + The compute device used for embeddings. Requires restart. 技术设备用于embeddings. 需要重启. - + Application default 程序默认 - + Display 显示 - + Show Sources 查看源码 - + Display the sources used for each response. 显示每个响应所使用的源。 - + Advanced 高级 - + Warning: Advanced usage only. 提示: 仅限高级使用。 - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. 值过大可能会导致 localdocs 失败、响应速度极慢或根本无法响应。粗略地说,{N 个字符 x N 个片段} 被添加到模型的上下文窗口中。更多信息请见<a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">此处</a>。 - + Document snippet size (characters) 文档粘贴大小 (字符) - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. 每个文档片段的字符数。较大的数值增加了事实性响应的可能性,但也会导致生成速度变慢。 - + Max document snippets per prompt 每个提示的最大文档片段数 - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. 检索到的文档片段最多添加到提示上下文中的前 N 个最佳匹配项。较大的数值增加了事实性响应的可能性,但也会导致生成速度变慢。 @@ -1447,17 +1447,17 @@ model to get started LocalDocsView - + LocalDocs 本地文档 - + Chat with your local files 和本地文件对话 - + + Add Collection + 添加集合 @@ -1472,136 +1472,136 @@ model to get started <h3>错误:无法访问 LocalDocs 数据库或该数据库无效。</h3><br><i>注意:尝试以下任何建议的修复方法后,您将需要重新启动。</i><br><ul><li>确保设置为<b>下载路径</b>的文件夹存在于文件系统中。</li><li>检查<b>下载路径</b>的所有权以及读写权限。</li><li>如果有<b>localdocs_v2.db</b>文件,请检查其所有权和读/写权限。</li></ul><br>如果问题仍然存在,并且存在任何“localdocs_v*.db”文件,作为最后的手段,您可以<br>尝试备份并删除它们。但是,您必须重新创建您的收藏。 - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. <h3>错误:无法访问 LocalDocs 数据库或该数据库无效。</h3><br><i>注意:尝试以下任何建议的修复方法后,您将需要重新启动。</i><br><ul><li>确保设置为<b>下载路径</b>的文件夹存在于文件系统中。</li><li>检查<b>下载路径</b>的所有权以及读写权限。</li><li>如果有<b>localdocs_v2.db</b>文件,请检查其所有权和读/写权限。</li></ul><br>如果问题仍然存在,并且存在任何“localdocs_v*.db”文件,作为最后的手段,您可以<br>尝试备份并删除它们。但是,您必须重新创建您的收藏。 - + No Collections Installed 没有集合 - + Install a collection of local documents to get started using this feature 安装一组本地文档以开始使用此功能 - + + Add Doc Collection + 添加文档集合 - + Shows the add model view 查看添加的模型 - + Indexing progressBar 索引进度 - + Shows the progress made in the indexing 显示索引进度 - + ERROR 错误 - + INDEXING 索引 - + EMBEDDING EMBEDDING - + REQUIRES UPDATE 需更新 - + READY 准备 - + INSTALLING 安装中 - + Indexing in progress 构建索引中 - + Embedding in progress Embedding进度 - + This collection requires an update after version change 此集合需要在版本更改后进行更新 - + Automatically reindexes upon changes to the folder 在文件夹变动时自动重新索引 - + Installation in progress 安装进度 - + % % - + %n file(s) %n 文件 - + %n word(s) %n 词 - + Remove 删除 - + Rebuild 重新构建 - + Reindex this folder from scratch. This is slow and usually not needed. 从头开始重新索引此文件夹。这个过程较慢,通常情况下不需要。 - + Update 更新 - + Update the collection to the new version. This is a slow operation. 将集合更新为新版本。这是一个缓慢的操作。 @@ -1609,52 +1609,63 @@ model to get started ModelList - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) %1 (%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>与 OpenAI 兼容的 API 模型</strong><br><ul><li>API 密钥:%1</li><li>基本 URL:%2</li><li>模型名称:%3</li></ul> - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>需要个人 OpenAI API 密钥。</li><li>警告:将把您的聊天内容发送给 OpenAI!</li><li>您的 API 密钥将存储在磁盘上</li><li>仅用于与 OpenAI 通信</li><li>您可以在此处<a href="https://platform.openai.com/account/api-keys">申请 API 密钥。</a></li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 - + <strong>Mistral Tiny model</strong><br> %1 <strong>Mistral Tiny model</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 <strong>Mistral Small model</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 <strong>Mistral Medium model</strong><br> %1 - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>需要个人 API 密钥和 API 基本 URL。</li><li>警告:将把您的聊天内容发送到您指定的与 OpenAI 兼容的 API 服务器!</li><li>您的 API 密钥将存储在磁盘上</li><li>仅用于与与 OpenAI 兼容的 API 服务器通信</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>连接到与 OpenAI 兼容的 API 服务器</strong><br> %1 @@ -1663,7 +1674,7 @@ model to get started <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* 即使您为ChatGPT-4向OpenAI付款,这也不能保证API密钥访问。联系OpenAI获取更多信息。 @@ -1672,7 +1683,7 @@ model to get started <strong>OpenAI's ChatGPT model GPT-4</strong><br> - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> @@ -1689,7 +1700,7 @@ model to get started <strong>Mistral Medium model</strong><br> - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> @@ -1697,57 +1708,57 @@ model to get started ModelSettings - + Model 模型 - + Model Settings 模型设置 - + Clone 克隆 - + Remove 删除 - + Name 名称 - + Model File 模型文件 - + System Prompt 系统提示词 - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. 每次对话开始时的前缀 - + Prompt Template 提示词模版 - + The template that wraps every prompt. 包装每个提示的模板 - + Must contain the string "%1" to be replaced with the user's input. 必须包含字符串 "%1" 替换为用户的's 输入. @@ -1757,37 +1768,37 @@ optional image 添加可选图片 - + Chat Name Prompt 聊天名称提示 - + Prompt used to automatically generate chat names. 用于自动生成聊天名称的提示。 - + Suggested FollowUp Prompt 建议的后续提示 - + Prompt used to generate suggested follow-up questions. 用于生成建议的后续问题的提示。 - + Context Length 上下文长度 - + Number of input and output tokens the model sees. 模型看到的输入和输出令牌的数量。 - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. @@ -1796,128 +1807,128 @@ NOTE: Does not take effect until you reload the model. 注意:在重新加载模型之前不会生效。 - + Temperature 温度 - + Randomness of model output. Higher -> more variation. 模型输出的随机性。更高->更多的变化。 - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. 温度增加了选择不太可能的token的机会。 注:温度越高,输出越有创意,但预测性越低。 - + Top-P Top-P - + Nucleus Sampling factor. Lower -> more predictable. 核子取样系数。较低->更具可预测性。 - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. 只能选择总概率高达top_p的最有可能的令牌。 注意:防止选择极不可能的token。 - + Min-P Min-P - + Minimum token probability. Higher -> more predictable. 最小令牌概率。更高 -> 更可预测。 - + Sets the minimum relative probability for a token to be considered. 设置被考虑的标记的最小相对概率。 - + Top-K Top-K - + Size of selection pool for tokens. 令牌选择池的大小。 - + Only the top K most likely tokens will be chosen from. 仅从最可能的前 K 个标记中选择 - + Max Length 最大长度 - + Maximum response length, in tokens. 最大响应长度(以令牌为单位) - + Prompt Batch Size 提示词大小 - + The batch size used for prompt processing. 用于快速处理的批量大小。 - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. 一次要处理的提示令牌数量。 注意:较高的值可以加快读取提示,但会使用更多的RAM。 - + Repeat Penalty 重复惩罚 - + Repetition penalty factor. Set to 1 to disable. 重复处罚系数。设置为1可禁用。 - + Repeat Penalty Tokens 重复惩罚数 - + Number of previous tokens used for penalty. 用于惩罚的先前令牌数量。 - + GPU Layers GPU 层 - + Number of model layers to load into VRAM. 要加载到VRAM中的模型层数。 - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1929,134 +1940,134 @@ NOTE: Does not take effect until you reload the model. ModelsView - + No Models Installed 无模型 - + Install a model to get started using GPT4All 安装模型并开始使用 - - + + + Add Model + 添加模型 - + Shows the add model view 查看增加到模型 - + Installed Models 已安装的模型 - + Locally installed chat models 本地安装的聊天 - + Model file 模型文件 - + Model file to be downloaded 待下载的模型 - + Description 描述 - + File description 文件描述 - + Cancel 取消 - + Resume 继续 - + Stop/restart/start the download 停止/重启/开始下载 - + Remove 删除 - + Remove model from filesystem 从系统中删除模型 - - + + Install 按照 - + Install online model 安装在线模型 - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">Error</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> - + ERROR: $API_KEY is empty. 错误:$API_KEY 为空 - + ERROR: $BASE_URL is empty. 错误:$BASE_URL 为空 - + enter $BASE_URL 输入 $BASE_URL - + ERROR: $MODEL_NAME is empty. 错误:$MODEL_NAME为空 - + enter $MODEL_NAME 输入:$MODEL_NAME - + %1 GB %1 GB - + ? @@ -2065,7 +2076,7 @@ NOTE: Does not take effect until you reload the model. <a href="#错误">错误</a> - + Describes an error that occurred when downloading 描述下载时发生的错误 @@ -2082,65 +2093,65 @@ NOTE: Does not take effect until you reload the model. GB) 你的系统需要 ( - + Error for incompatible hardware 硬件不兼容的错误 - + Download progressBar 下载进度 - + Shows the progress made in the download 显示下载进度 - + Download speed 下载速度 - + Download speed in bytes/kilobytes/megabytes per second 下载速度 b/kb/mb /s - + Calculating... 计算中... - - - - + + + + Whether the file hash is being calculated 是否正在计算文件哈希 - + Busy indicator 繁忙程度 - + Displayed when the file hash is being calculated 在计算文件哈希时显示 - + enter $API_KEY 输入 $API_KEY - + File size 文件大小 - + RAM required 需要 RAM @@ -2149,17 +2160,17 @@ NOTE: Does not take effect until you reload the model. GB - + Parameters 参数 - + Quant 量化 - + Type 类型 @@ -2167,12 +2178,12 @@ NOTE: Does not take effect until you reload the model. MyFancyLink - + Fancy link 精选链接 - + A stylized link 样式化链接 @@ -2180,7 +2191,7 @@ NOTE: Does not take effect until you reload the model. MySettingsStack - + Please choose a directory 请选择目录 @@ -2188,12 +2199,12 @@ NOTE: Does not take effect until you reload the model. MySettingsTab - + Restore Defaults 恢复初始化 - + Restores settings dialog to a default state 将设置对话框恢复为默认状态 @@ -2201,12 +2212,12 @@ NOTE: Does not take effect until you reload the model. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. 向GPT4All开源数据湖贡献数据 - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2219,47 +2230,47 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O 注意:通过启用此功能,您将把数据发送到 GPT4All 开源数据湖。启用此功能后,您不应该期望聊天隐私。但是,如果您愿意,您应该期望可选的归因。您的聊天数据将公开供任何人下载,并将被 Nomic AI 用于改进未来的 GPT4All 模型。Nomic AI 将保留与您的数据相关的所有归因信息,并且您将被视为使用您的数据的任何 GPT4All 模型发布的贡献者! - + Terms for opt-in 选择加入的条款 - + Describes what will happen when you opt-in 描述选择加入时会发生的情况 - + Please provide a name for attribution (optional) 填写名称属性 (可选) - + Attribution (optional) 属性 (可选) - + Provide attribution 提供属性 - + Enable 启用 - + Enable opt-in 启用选择加入 - + Cancel 取消 - + Cancel opt-in 取消加入 @@ -2267,17 +2278,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O NewVersionDialog - + New version is available 新版本可选 - + Update 更新 - + Update to new version 更新到新版本 @@ -2285,17 +2296,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O PopupDialog - + Reveals a shortlived help balloon 显示一个短暂的帮助气球 - + Busy indicator 繁忙程度 - + Displayed when the popup is showing busy 在弹出窗口显示忙碌时显示 @@ -2310,28 +2321,28 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O SettingsView - - + + Settings 设置 - + Contains various application settings 包含各种应用程序设置 - + Application 应用 - + Model 模型 - + LocalDocs 本地文档 @@ -2339,7 +2350,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O StartupDialog - + Welcome! 欢迎! @@ -2354,7 +2365,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O ### 贡献者 - + ### Release notes %1### Contributors %2 @@ -2363,17 +2374,17 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O %2 - + Release notes 发布日志 - + Release notes for this version 本版本发布日志 - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2398,71 +2409,71 @@ model release that uses your data! 模型发布的贡献者! - + Terms for opt-in 选择加入选项 - + Describes what will happen when you opt-in 描述选择加入时会发生的情况 - - + + Opt-in for anonymous usage statistics 允许选择加入匿名使用统计数据 - - + + Yes - + Allow opt-in for anonymous usage statistics 允许选择加入匿名使用统计数据 - - + + No - + Opt-out for anonymous usage statistics 退出匿名使用统计数据 - + Allow opt-out for anonymous usage statistics 允许选择退出匿名使用统计数据 - - + + Opt-in for network 加入网络 - + Allow opt-in for network 允许选择加入网络 - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake 允许选择加入匿名共享聊天至 GPT4All 数据湖 - + Opt-out for network 取消网络 - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake 允许选择退出将聊天匿名共享至 GPT4All 数据湖 @@ -2470,23 +2481,23 @@ model release that uses your data! SwitchModelDialog - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>警告:</b> 更改模型将删除当前对话。您想继续吗? - + Continue 继续 - + Continue with model loading 模型载入时继续 - - + + Cancel 取消 @@ -2494,32 +2505,32 @@ model release that uses your data! ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) 请编辑下方文本以提供更好的回复。(可选) - + Please provide a better response... 提供更好回答... - + Submit 提交 - + Submits the user's response 提交用户响应 - + Cancel 取消 - + Closes the response dialog 关闭的对话 @@ -2583,125 +2594,125 @@ model release that uses your data! 检查链接 <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> 寻求. - + GPT4All v%1 GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>启动时遇到错误:</h3><br><i>“检测到不兼容的硬件。”</i><br><br>很遗憾,您的 CPU 不满足运行此程序的最低要求。特别是,它不支持此程序成功运行现代大型语言模型所需的 AVX 内在函数。目前唯一的解决方案是将您的硬件升级到更现代的 CPU。<br><br>有关更多信息,请参阅此处:<a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions>>https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>启动时遇到错误:</h3><br><i>“无法访问设置文件。”</i><br><br>不幸的是,某些东西阻止程序访问设置文件。这可能是由于设置文件所在的本地应用程序配置目录中的权限不正确造成的。请查看我们的<a href="https://discord.gg/4M2QFmTt2k">discord 频道</a> 以获取帮助。 - + Connection to datalake failed. 链接数据湖失败 - + Saving chats. 保存对话 - + Network dialog 网络对话 - + opt-in to share feedback/conversations 选择加入以共享反馈/对话 - + Home view 主页 - + Home view of application 主页 - + Home 主页 - + Chat view 对话视图 - + Chat view to interact with models 聊天视图可与模型互动 - + Chats 对话 - - + + Models 模型 - + Models view for installed models 已安装模型的页面 - - + + LocalDocs 本地文档 - + LocalDocs view to configure and use local docs LocalDocs视图可配置和使用本地文档 - - + + Settings 设置 - + Settings view for application configuration 设置页面 - + The datalake is enabled 数据湖已开启 - + Using a network model 使用联网模型 - + Server mode is enabled 服务器模式已开 - + Installed models 安装模型 - + View of installed models 查看已安装模型 diff --git a/gpt4all-chat/translations/gpt4all_zh_TW.ts b/gpt4all-chat/translations/gpt4all_zh_TW.ts index f0fd630e272c..e6473e0ef344 100644 --- a/gpt4all-chat/translations/gpt4all_zh_TW.ts +++ b/gpt4all-chat/translations/gpt4all_zh_TW.ts @@ -4,62 +4,62 @@ AddCollectionView - + ← Existing Collections ← 現有收藏 - + Add Document Collection 新增收藏文件 - + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. 新增一個含有純文字檔案、PDF 與 Markdown 文件的資料夾。可在設定上增加文件副檔名。 - + Please choose a directory 請選擇一個資料夾 - + Name 名稱 - + Collection name... 收藏名稱...... - + Name of the collection to add (Required) 新增的收藏名稱(必填) - + Folder 資料夾 - + Folder path... 資料夾路徑...... - + Folder path to documents (Required) 文件所屬的資料夾路徑(必填) - + Browse 瀏覽 - + Create Collection 建立收藏 @@ -67,289 +67,289 @@ AddModelView - + ← Existing Models ← 現有模型 - + Explore Models 探索模型 - + Discover and download models by keyword search... 透過關鍵字搜尋探索並下載模型...... - + Text field for discovering and filtering downloadable models 用於探索與過濾可下載模型的文字字段 - + Searching · %1 搜尋 · %1 - + Initiate model discovery and filtering 探索與過濾模型 - + Triggers discovery and filtering of models 觸發探索與過濾模型 - + Default 預設 - + Likes - + Downloads 下載次數 - + Recent 最新 - + Sort by: %1 排序依據:%1 - + Asc 升序 - + Desc 降序 - + Sort dir: %1 排序順序:%1 - + None - + Limit: %1 上限:%1 - + Network error: could not retrieve %1 網路錯誤:無法取得 %1 - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">錯誤</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">警告:不推薦在您的硬體上運作。模型需要比較多的記憶體(%1 GB),但您的系統記憶體空間不足(%2)。</strong></font> - + %1 GB %1 GB - - + + ? - - + + Busy indicator 參考自 https://terms.naer.edu.tw 忙線指示器 - + Displayed when the models request is ongoing 當模型請求正在進行時顯示 - + Model file 模型檔案 - + Model file to be downloaded 即將下載的模型檔案 - + Description 描述 - + File description 檔案描述 - + Cancel 取消 - + Resume 恢復 - + Download 下載 - + Stop/restart/start the download 停止/重啟/開始下載 - + Remove 移除 - + Remove model from filesystem 從檔案系統移除模型 - - + + Install 安裝 - + Install online model 安裝線上模型 - + Describes an error that occurred when downloading 解釋下載時發生的錯誤 - + Error for incompatible hardware 錯誤,不相容的硬體 - + Download progressBar 下載進度條 - + Shows the progress made in the download 顯示下載進度 - + Download speed 下載速度 - + Download speed in bytes/kilobytes/megabytes per second 下載速度每秒 bytes/kilobytes/megabytes - + Calculating... 計算中...... - - - - + + + + Whether the file hash is being calculated 是否正在計算檔案雜湊 - + Displayed when the file hash is being calculated 計算檔案雜湊值時顯示 - + ERROR: $API_KEY is empty. 錯誤:$API_KEY 未填寫。 - + enter $API_KEY 請輸入 $API_KEY - + ERROR: $BASE_URL is empty. 錯誤:$BASE_URL 未填寫。 - + enter $BASE_URL 請輸入 $BASE_URL - + ERROR: $MODEL_NAME is empty. 錯誤:$MODEL_NAME 未填寫。 - + enter $MODEL_NAME 請輸入 $MODEL_NAME - + File size 檔案大小 - + RAM required 所需的記憶體 - + Parameters 參數 - + Quant 量化 - + Type 類型 @@ -357,22 +357,22 @@ ApplicationSettings - + Application 應用程式 - + Network dialog 資料湖泊計畫對話視窗 - + opt-in to share feedback/conversations 分享回饋/對話計畫 - + ERROR: Update system could not find the MaintenanceTool used<br> to check for updates!<br><br> Did you install this application using the online installer? If so,<br> @@ -387,223 +387,223 @@ 如果您無法順利啟動,您可能得重新安裝本應用程式。 - + Error dialog 錯誤對話視窗 - + Application Settings 應用程式設定 - + General 一般 - + Theme 主題 - + The application color scheme. 應用程式的配色方案。 - + Dark 暗色 - + Light 亮色 - + LegacyDark 傳統暗色 - + Font Size 字體大小 - + The size of text in the application. 應用程式中的字體大小。 - + Small - + Medium - + Large - + Language and Locale 語言與區域設定 - + The language and locale you wish to use. 您希望使用的語言與區域設定。 - + System Locale 系統語系 - + Device 裝置 - + Default Model 預設模型 - + The preferred model for new chats. Also used as the local server fallback. 用於新交談的預設模型。也用於作為本機伺服器後援使用。 - + Suggestion Mode 建議模式 - + When chatting with LocalDocs 當使用「我的文件」交談時 - + Whenever possible 視情況允許 - + Never 永不 - + Generate suggested follow-up questions at the end of responses. 在回覆末尾生成後續建議的問題。 - + The compute device used for text generation. 用於生成文字的計算裝置。 - - + + Application default 應用程式預設值 - + Download Path 下載路徑 - + Where to store local models and the LocalDocs database. 儲存本機模型與「我的文件」資料庫的位置。 - + Browse 瀏覽 - + Choose where to save model files 選擇儲存模型檔案的位置 - + Enable Datalake 啟用資料湖泊 - + Send chats and feedback to the GPT4All Open-Source Datalake. 將交談與回饋傳送到 GPT4All 開放原始碼資料湖泊。 - + Advanced 進階 - + CPU Threads 中央處理器(CPU)線程 - + The number of CPU threads used for inference and embedding. 用於推理與嵌入的中央處理器線程數。 - + Save Chat Context 儲存交談語境 - + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. 將交談模型的狀態儲存到磁碟以加快載入速度。警告:每次交談使用約 2GB。 - + Enable Local Server 啟用本機伺服器 - + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. 將 OpenAI 相容伺服器公開給本機。警告:導致資源使用增加。 - + API Server Port API 伺服器埠口 - + The port to use for the local server. Requires restart. 用於本機伺服器的埠口。需要重新啟動。 - + Check For Updates 檢查更新 - + Manually check for an update to GPT4All. 手動檢查 GPT4All 的更新。 - + Updates 更新 @@ -611,13 +611,13 @@ Chat - - + + New Chat 新的交談 - + Server Chat 伺服器交談 @@ -625,12 +625,12 @@ ChatAPIWorker - + ERROR: Network error occurred while connecting to the API server 錯誤:網路錯誤,無法連線到目標 API 伺服器 - + ChatAPIWorker::handleFinished got HTTP Error %1 %2 ChatAPIWorker::handleFinished 遇到一個 HTTP 錯誤 %1 %2 @@ -638,62 +638,62 @@ ChatDrawer - + Drawer 側邊欄 - + Main navigation drawer 主要導航側邊欄 - + + New Chat + 新的交談 - + Create a new chat 建立新的交談 - + Select the current chat or edit the chat when in edit mode 選擇目前交談或在編輯模式下編輯交談 - + Edit chat name 修改對話名稱 - + Save chat name 儲存對話名稱 - + Delete chat 刪除對話 - + Confirm chat deletion 確定刪除對話 - + Cancel chat deletion 取消刪除對話 - + List of chats 交談列表 - + List of chats in the drawer dialog 側邊欄對話視窗的交談列表 @@ -701,32 +701,32 @@ ChatListModel - + TODAY 今天 - + THIS WEEK 這星期 - + THIS MONTH 這個月 - + LAST SIX MONTHS 前六個月 - + THIS YEAR 今年 - + LAST YEAR 去年 @@ -734,329 +734,329 @@ ChatView - + <h3>Warning</h3><p>%1</p> <h3>警告</h3><p>%1</p> - + Switch model dialog 切換模型對話視窗 - + Warn the user if they switch models, then context will be erased 警告使用者如果切換模型,則語境將被刪除 - + Conversation copied to clipboard. 對話已複製到剪貼簿。 - + Code copied to clipboard. 程式碼已複製到剪貼簿。 - + Chat panel 交談面板 - + Chat panel with options 具有選項的交談面板 - + Reload the currently loaded model 重新載入目前已載入的模型 - + Eject the currently loaded model 彈出目前載入的模型 - + No model installed. 沒有已安裝的模型。 - + Model loading error. 模型載入時發生錯誤。 - + Waiting for model... 等待模型中...... - + Switching context... 切換語境中...... - + Choose a model... 選擇一個模型...... - + Not found: %1 不存在:%1 - - + + Reload · %1 重新載入 · %1 - + Loading · %1 載入中 · %1 - + Load · %1 (default) → 載入 · %1 (預設) → - + The top item is the current model 最上面的那項是目前使用的模型 - - + + LocalDocs 我的文件 - + Add documents 新增文件 - + add collections of documents to the chat 將文件集合新增至交談中 - + Load the default model 載入預設模型 - + Loads the default model which can be changed in settings 預設模型可於設定中變更 - + No Model Installed 沒有已安裝的模型 - + GPT4All requires that you install at least one model to get started GPT4All 要求您至少安裝一個 模型開始 - + Install a Model 安裝一個模型 - + Shows the add model view 顯示新增模型視圖 - + Conversation with the model 與模型對話 - + prompt / response pairs from the conversation 對話中的提示詞 / 回覆組合 - + GPT4All GPT4All - + You - + response stopped ... 回覆停止...... - + retrieving localdocs: %1 ... 檢索本機文件中:%1 ...... - + searching localdocs: %1 ... 搜尋本機文件中:%1 ...... - + processing ... 處理中...... - + generating response ... 生成回覆...... - + generating questions ... 生成問題...... - - + + Copy 複製 - + Copy Message 複製訊息 - + Disable markdown 停用 Markdown - + Enable markdown 啟用 Markdown - + Thumbs up - + Gives a thumbs up to the response 對這則回覆比讚 - + Thumbs down 倒讚 - + Opens thumbs down dialog 開啟倒讚對話視窗 - + Suggested follow-ups 後續建議 - + Erase and reset chat session 刪除並重置交談會話 - + Copy chat session to clipboard 複製交談會議到剪貼簿 - + Redo last chat response 復原上一個交談回覆 - + Stop generating 停止生成 - + Stop the current response generation 停止當前回覆生成 - + Reloads the model 重新載入模型 - + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help <h3>載入模型時發生錯誤:</h3><br><i>"%1"</i><br><br>導致模型載入失敗的原因可能有很多種,但絕大多數的原因是檔案格式損毀、下載的檔案不完整、檔案類型錯誤、系統RAM空間不足或不相容的模型類型。這裡有些建議可供疑難排解:<br><ul><li>確保使用的模型是相容的格式與類型<li>檢查位於下載資料夾的檔案是否完整<li>您可以從設定中找到您所設定的「下載資料夾路徑」<li>如果您有側載模型,請利用 md5sum 等工具確保您的檔案是完整的<li>想了解更多關於我們所支援的模型資訊,煩請詳閱<a href="https://docs.gpt4all.io/">本文件</a>。<li>歡迎洽詢我們的 <a href="https://discord.gg/4M2QFmTt2k">Discord 伺服器</a> 以尋求幫助 - + restoring from text ... 從文字中恢復...... - + %n Source(s) %n 來源 - + Send a message... 傳送一則訊息...... - + Load a model to continue... 載入模型以繼續...... - + Send messages/prompts to the model 向模型傳送訊息/提示詞 - + Cut 剪下 - + Paste 貼上 - + Select All 全選 - + Send message 傳送訊息 - + Sends the message/prompt contained in textfield to the model 將文字欄位中包含的訊息/提示詞傳送到模型 @@ -1064,36 +1064,36 @@ model to get started CollectionsDrawer - + Warning: searching collections while indexing can return incomplete results 警告:在索引時搜尋收藏可能會傳回不完整的結果 - + %n file(s) %n 個檔案 - + %n word(s) %n 個字 - + Updating 更新中 - + + Add Docs + 新增文件 - + Select a collection to make it available to the chat model. 選擇一個收藏以使其可供交談模型使用。 @@ -1101,37 +1101,37 @@ model to get started Download - + Model "%1" is installed successfully. 模型「%1」已安裝成功。 - + ERROR: $MODEL_NAME is empty. 錯誤:$MODEL_NAME 未填寫。 - + ERROR: $API_KEY is empty. 錯誤:$API_KEY 未填寫。 - + ERROR: $BASE_URL is invalid. 錯誤:$BASE_URL 無效。 - + ERROR: Model "%1 (%2)" is conflict. 錯誤:模型「%1 (%2)」發生衝突。 - + Model "%1 (%2)" is installed successfully. 模型「%1(%2)」已安裝成功。 - + Model "%1" is removed. 模型「%1」已移除。 @@ -1139,92 +1139,92 @@ model to get started HomeView - + Welcome to GPT4All 歡迎使用 GPT4All - + The privacy-first LLM chat application 隱私第一的大型語言模型交談應用程式 - + Start chatting 開始交談 - + Start Chatting 開始交談 - + Chat with any LLM 與任何大型語言模型交談 - + LocalDocs 我的文件 - + Chat with your local files 使用「我的文件」來交談 - + Find Models 搜尋模型 - + Explore and download models 瀏覽與下載模型 - + Latest news 最新消息 - + Latest news from GPT4All 從 GPT4All 來的最新消息 - + Release Notes 版本資訊 - + Documentation 文件 - + Discord Discord - + X (Twitter) X (Twitter) - + Github Github - + nomic.ai nomic.ai - + Subscribe to Newsletter 訂閱電子報 @@ -1232,117 +1232,117 @@ model to get started LocalDocsSettings - + LocalDocs 我的文件 - + LocalDocs Settings 我的文件設定 - + Indexing 索引中 - + Allowed File Extensions 允許的副檔名 - + Comma-separated list. LocalDocs will only attempt to process files with these extensions. 以逗號分隔的列表。「我的文件」將僅嘗試處理具有這些副檔名的檔案。 - + Embedding 嵌入 - + Use Nomic Embed API 使用 Nomic 嵌入 API - + Embed documents using the fast Nomic API instead of a private local model. Requires restart. 使用快速的 Nomic API 而不是本機私有模型嵌入文件。需要重新啟動。 - + Nomic API Key Nomic API 金鑰 - + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. 用於 Nomic Embed 的 API 金鑰。從 Atlas <a href="https://atlas.nomic.ai/cli-login">API 金鑰頁面</a>取得一個。需要重新啟動。 - + Embeddings Device 嵌入裝置 - + The compute device used for embeddings. Requires restart. 用於嵌入的計算裝置。需要重新啟動。 - + Application default 應用程式預設值 - + Display 顯示 - + Show Sources 查看來源 - + Display the sources used for each response. 顯示每則回覆所使用的來源。 - + Advanced 進階 - + Warning: Advanced usage only. 警告:僅限進階使用。 - + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. 設定太大的數值可能會導致「我的文件」處理失敗、反應速度極慢或根本無法回覆。簡單地說,這會將 {N 個字元 x N 個片段} 被添加到模型的語境視窗中。更多資訊<a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">此處</a>。 - + Document snippet size (characters) 文件片段大小(字元) - + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. 每個文件片段的字元數。較大的數字會增加實際反應的可能性,但也會導致生成速度變慢。 - + Max document snippets per prompt 每個提示詞的最大文件片段 - + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. 新增至提示詞語境中的檢索到的文件片段的最大 N 個符合的項目。較大的數字會增加實際反應的可能性,但也會導致生成速度變慢。 @@ -1350,151 +1350,151 @@ model to get started LocalDocsView - + LocalDocs 我的文件 - + Chat with your local files 使用「我的文件」來交談 - + + Add Collection + 新增收藏 - + <h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br><i>Note: You will need to restart after trying any of the following suggested fixes.</i><br><ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li><li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li><li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write permissions, too.</li></ul><br>If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>try backing them up and removing them. You will have to recreate your collections, however. <h3>錯誤:「我的文件」資料庫已無法存取或已損壞。</h3><br><i>提醒:執行完以下任何疑難排解的動作後,請務必重新啟動應用程式。</i><br><ul><li>請確保<b>「下載路徑」</b>所指向的資料夾確實存在於檔案系統當中。</li><li>檢查 <b>「下載路徑」</b>所指向的資料夾,確保其「擁有者」為您本身,以及確保您對該資料夾擁有讀寫權限。</li><li>如果該資料夾內存在一份名為 <b>localdocs_v2.db</b> 的檔案,請同時確保您對其擁有讀寫權限。</li></ul><br>如果問題依舊存在,且該資料夾內存在與「localdocs_v*.db」名稱相關的檔案,請嘗試備份並移除它們。<br>雖然這樣一來,您恐怕得著手重建您的收藏,但這將或許能夠解決這份錯誤。 - + No Collections Installed 沒有已安裝的收藏 - + Install a collection of local documents to get started using this feature 安裝本機文件收藏以開始使用此功能 - + + Add Doc Collection + 新增文件收藏 - + Shows the add model view 查看新增的模型視圖 - + Indexing progressBar 索引進度條 - + Shows the progress made in the indexing 顯示索引進度 - + ERROR 錯誤 - + INDEXING 索引中 - + EMBEDDING 嵌入中 - + REQUIRES UPDATE 必須更新 - + READY 已就緒 - + INSTALLING 安裝中 - + Indexing in progress 正在索引 - + Embedding in progress 正在嵌入 - + This collection requires an update after version change 該收藏需要在版本變更後更新 - + Automatically reindexes upon changes to the folder 若資料夾有變動,會自動重新索引 - + Installation in progress 正在安裝中 - + % % - + %n file(s) %n 個檔案 - + %n word(s) %n 個字 - + Remove 移除 - + Rebuild 重建 - + Reindex this folder from scratch. This is slow and usually not needed. 重新索引該資料夾。這將會耗費許多時間並且通常不太需要這樣做。 - + Update 更新 - + Update the collection to the new version. This is a slow operation. 更新收藏。這將會耗費許多時間。 @@ -1502,67 +1502,78 @@ model to get started ModelList - + + + cannot open "%1": %2 + + + + + cannot create "%1": %2 + + + + %1 (%2) %1(%2) - + <strong>OpenAI-Compatible API Model</strong><br><ul><li>API Key: %1</li><li>Base URL: %2</li><li>Model Name: %3</li></ul> <strong>OpenAI API 相容模型</strong><br><ul><li>API 金鑰:%1</li><li>基底 URL:%2</li><li>模型名稱:%3</li></ul> - + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> <ul><li>需要個人的 OpenAI API 金鑰。</li><li>警告:這將會傳送您的交談紀錄到 OpenAI</li><li>您的 API 金鑰將被儲存在硬碟上</li><li>它只被用於與 OpenAI 進行通訊</li><li>您可以在<a href="https://platform.openai.com/account/api-keys">此處</a>申請一個 API 金鑰。</li> - + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 <strong>OpenAI 的 ChatGPT 模型 GPT-3.5 Turbo</strong><br> %1 - + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. <br><br><i>* 即使您已向 OpenAI 付費購買了 ChatGPT 的 GPT-4 模型使用權,但這也不能保證您能擁有 API 金鑰的使用權限。請聯繫 OpenAI 以查閱更多資訊。 - + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 <strong>OpenAI 的 ChatGPT 模型 GPT-4</strong><br> %1 %2 - + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> <ul><li>需要個人的 Mistral API 金鑰。</li><li>警告:這將會傳送您的交談紀錄到 Mistral!</li><li>您的 API 金鑰將被儲存在硬碟上</li><li>它只被用於與 Mistral 進行通訊</li><li>您可以在<a href="https://console.mistral.ai/user/api-keys">此處</a>申請一個 API 金鑰。</li> - + <strong>Mistral Tiny model</strong><br> %1 <strong>Mistral 迷你模型</strong><br> %1 - + <strong>Mistral Small model</strong><br> %1 <strong>Mistral 小型模型</strong><br> %1 - + <strong>Mistral Medium model</strong><br> %1 <strong>Mistral 中型模型</strong><br> %1 - + <ul><li>Requires personal API key and the API base URL.</li><li>WARNING: Will send your chats to the OpenAI-compatible API Server you specified!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with the OpenAI-compatible API Server</li> <ul><li>需要個人的 API 金鑰和 API 的基底 URL(Base URL)。</li><li>警告:這將會傳送您的交談紀錄到您所指定的 OpenAI API 相容伺服器</li><li>您的 API 金鑰將被儲存在硬碟上</li><li>它只被用於與其 OpenAI API 相容伺服器進行通訊</li> - + <strong>Connect to OpenAI-compatible API server</strong><br> %1 <strong>連線到 OpenAI API 相容伺服器</strong><br> %1 - + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> <strong>模型作者:%1</strong><br><ul><li>發佈日期:%2<li>累積讚數:%3 個讚<li>下載次數:%4 次<li>更多資訊請查閱<a href="https://huggingface.co/%5">此處</a>。</ul> @@ -1570,92 +1581,92 @@ model to get started ModelSettings - + Model 模型 - + Model Settings 模型設定 - + Clone 複製 - + Remove 移除 - + Name 名稱 - + Model File 模型檔案 - + System Prompt 系統提示詞 - + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. 在每個對話的開頭加上前綴。必須包含適當的構建符元(framing tokens)。 - + Prompt Template 提示詞模板 - + The template that wraps every prompt. 包裝每個提示詞的模板。 - + Must contain the string "%1" to be replaced with the user's input. 必須包含要替換為使用者輸入的字串「%1」。 - + Chat Name Prompt 交談名稱提示詞 - + Prompt used to automatically generate chat names. 用於自動生成交談名稱的提示詞。 - + Suggested FollowUp Prompt 後續建議提示詞 - + Prompt used to generate suggested follow-up questions. 用於生成後續建議問題的提示詞。 - + Context Length 語境長度(Context Length) - + Number of input and output tokens the model sees. 模型看見的輸入與輸出的符元數量。 - + Maximum combined prompt/response tokens before information is lost. Using more context than the model was trained on will yield poor results. NOTE: Does not take effect until you reload the model. @@ -1664,128 +1675,128 @@ NOTE: Does not take effect until you reload the model. 注意:重新載入模型後才會生效。 - + Temperature 語境溫度(Temperature) - + Randomness of model output. Higher -> more variation. 模型輸出的隨機性。更高 -> 更多變化。 - + Temperature increases the chances of choosing less likely tokens. NOTE: Higher temperature gives more creative but less predictable outputs. 語境溫度會提高選擇不容易出現的符元機率。 注意:較高的語境溫度會生成更多創意,但輸出的可預測性會相對較差。 - + Top-P 核心採樣(Top-P) - + Nucleus Sampling factor. Lower -> more predictable. 核心採樣因子。更低 -> 更可預測。 - + Only the most likely tokens up to a total probability of top_p can be chosen. NOTE: Prevents choosing highly unlikely tokens. 只選擇總機率約為核心採樣,最有可能性的符元。 注意:用於避免選擇不容易出現的符元。 - + Min-P 最小符元機率(Min-P) - + Minimum token probability. Higher -> more predictable. 最小符元機率。更高 -> 更可預測。 - + Sets the minimum relative probability for a token to be considered. 設定要考慮的符元的最小相對機率。 - + Top-K 高頻率採樣機率(Top-K) - + Size of selection pool for tokens. 符元選擇池的大小。 - + Only the top K most likely tokens will be chosen from. 只選擇前 K 個最有可能性的符元。 - + Max Length 最大長度(Max Length) - + Maximum response length, in tokens. 最大響應長度(以符元為單位)。 - + Prompt Batch Size 提示詞批次大小(Prompt Batch Size) - + The batch size used for prompt processing. 用於即時處理的批量大小。 - + Amount of prompt tokens to process at once. NOTE: Higher values can speed up reading prompts but will use more RAM. 一次處理的提示詞符元數量。 注意:較高的值可以加快讀取提示詞的速度,但會使用比較多的記憶體。 - + Repeat Penalty 重複處罰(Repeat Penalty) - + Repetition penalty factor. Set to 1 to disable. 重複懲罰因子。設定為 1 以停用。 - + Repeat Penalty Tokens 重複懲罰符元(Repeat Penalty Tokens) - + Number of previous tokens used for penalty. 之前用於懲罰的符元數量。 - + GPU Layers 圖形處理器負載層(GPU Layers) - + Number of model layers to load into VRAM. 要載入到顯示記憶體中的模型層數。 - + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. Lower values increase CPU load and RAM usage, and make inference slower. NOTE: Does not take effect until you reload the model. @@ -1797,218 +1808,218 @@ NOTE: Does not take effect until you reload the model. ModelsView - + No Models Installed 沒有已安裝的模型 - + Install a model to get started using GPT4All 安裝模型以開始使用 GPT4All - - + + + Add Model + 新增模型 - + Shows the add model view 顯示新增模型視圖 - + Installed Models 已安裝的模型 - + Locally installed chat models 本機已安裝的交談模型 - + Model file 模型檔案 - + Model file to be downloaded 即將下載的模型檔案 - + Description 描述 - + File description 檔案描述 - + Cancel 取消 - + Resume 恢復 - + Stop/restart/start the download 停止/重啟/開始下載 - + Remove 移除 - + Remove model from filesystem 從檔案系統移除模型 - - + + Install 安裝 - + Install online model 安裝線上模型 - + <strong><font size="1"><a href="#error">Error</a></strong></font> <strong><font size="1"><a href="#error">錯誤</a></strong></font> - + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> <strong><font size="2">警告:不推薦在您的硬體上運作。模型需要比較多的記憶體(%1 GB),但您的系統記憶體空間不足(%2)。</strong></font> - + %1 GB %1 GB - + ? - + Describes an error that occurred when downloading 解釋下載時發生的錯誤 - + Error for incompatible hardware 錯誤,不相容的硬體 - + Download progressBar 下載進度條 - + Shows the progress made in the download 顯示下載進度 - + Download speed 下載速度 - + Download speed in bytes/kilobytes/megabytes per second 下載速度每秒 bytes/kilobytes/megabytes - + Calculating... 計算中...... - - - - + + + + Whether the file hash is being calculated 是否正在計算檔案雜湊 - + Busy indicator 參考自 https://terms.naer.edu.tw 忙線指示器 - + Displayed when the file hash is being calculated 計算檔案雜湊值時顯示 - + ERROR: $API_KEY is empty. 錯誤:$API_KEY 未填寫。 - + enter $API_KEY 請輸入 $API_KEY - + ERROR: $BASE_URL is empty. 錯誤:$BASE_URL 未填寫。 - + enter $BASE_URL 請輸入 $BASE_URL - + ERROR: $MODEL_NAME is empty. 錯誤:$MODEL_NAME 未填寫。 - + enter $MODEL_NAME 請輸入 $MODEL_NAME - + File size 檔案大小 - + RAM required 所需的記憶體 - + Parameters 參數 - + Quant 量化 - + Type 類型 @@ -2016,12 +2027,12 @@ NOTE: Does not take effect until you reload the model. MyFancyLink - + Fancy link 精緻網址 - + A stylized link 個性化網址 @@ -2029,7 +2040,7 @@ NOTE: Does not take effect until you reload the model. MySettingsStack - + Please choose a directory 請選擇一個資料夾 @@ -2037,12 +2048,12 @@ NOTE: Does not take effect until you reload the model. MySettingsTab - + Restore Defaults 恢復預設值 - + Restores settings dialog to a default state 恢復設定對話視窗到預設狀態 @@ -2050,12 +2061,12 @@ NOTE: Does not take effect until you reload the model. NetworkDialog - + Contribute data to the GPT4All Opensource Datalake. 貢獻資料到 GPT4All 的開放原始碼資料湖泊。 - + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. @@ -2073,47 +2084,47 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將被認可為任何使用您的資料的 GPT4All 模型版本的貢獻者! - + Terms for opt-in 計畫規範 - + Describes what will happen when you opt-in 解釋當您加入計畫後,會發生什麼事情 - + Please provide a name for attribution (optional) 請提供署名(非必填) - + Attribution (optional) 署名(非必填) - + Provide attribution 提供署名 - + Enable 啟用 - + Enable opt-in 加入計畫 - + Cancel 取消 - + Cancel opt-in 拒絕計畫 @@ -2121,17 +2132,17 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 NewVersionDialog - + New version is available 發現新版本 - + Update 更新 - + Update to new version 更新版本 @@ -2139,18 +2150,18 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 PopupDialog - + Reveals a shortlived help balloon 呼叫提示小幫手 - + Busy indicator 參考自 https://terms.naer.edu.tw 忙線指示器 - + Displayed when the popup is showing busy 當彈出視窗忙碌時顯示 @@ -2158,28 +2169,28 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 SettingsView - - + + Settings 設定 - + Contains various application settings 內含多種應用程式設定 - + Application 應用程式 - + Model 模型 - + LocalDocs 我的文件 @@ -2187,22 +2198,22 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 StartupDialog - + Welcome! 歡迎使用! - + Release notes 版本資訊 - + Release notes for this version 這個版本的版本資訊 - + ### Opt-ins for anonymous usage analytics and datalake By enabling these features, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. @@ -2230,35 +2241,35 @@ model release that uses your data! Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將被認可為任何使用您的資料的 GPT4All 模型版本的貢獻者! - + Terms for opt-in 計畫規範 - + Describes what will happen when you opt-in 解釋當您加入計畫後,會發生什麼事情 - - + + Yes - - + + No - - + + Opt-in for anonymous usage statistics 匿名使用統計計畫 - + ### Release notes %1### Contributors %2 @@ -2267,43 +2278,43 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 %2 - + Allow opt-in for anonymous usage statistics 加入匿名使用統計計畫 - + Opt-out for anonymous usage statistics 退出匿名使用統計計畫 - + Allow opt-out for anonymous usage statistics 終止並退出匿名使用統計計畫 - - + + Opt-in for network 資料湖泊計畫 - + Allow opt-in for network 加入資料湖泊計畫 - + Opt-out for network 退出資料湖泊計畫 - + Allow opt-in anonymous sharing of chats to the GPT4All Datalake 開始將交談內容匿名分享到 GPT4All 資料湖泊 - + Allow opt-out anonymous sharing of chats to the GPT4All Datalake 終止將交談內容匿名分享到 GPT4All 資料湖泊 @@ -2311,23 +2322,23 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 SwitchModelDialog - + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? <b>警告:</b> 變更模型將會清除目前對話內容。您真的想要繼續嗎? - + Continue 繼續 - + Continue with model loading 繼續載入模型 - - + + Cancel 取消 @@ -2335,32 +2346,32 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 ThumbsDownDialog - + Please edit the text below to provide a better response. (optional) 請編輯以下文字,以提供更好的回覆。(非必填) - + Please provide a better response... 請提供一則更好的回覆...... - + Submit 送出 - + Submits the user's response 送出使用者的回覆 - + Cancel 取消 - + Closes the response dialog 關閉回覆對話視窗 @@ -2368,125 +2379,125 @@ Nomic AI 將保留附加在您的資料上的所有署名訊息,並且您將 main - + GPT4All v%1 GPT4All v%1 - + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> <h3>啟動時發生錯誤:</h3><br><i>「偵測到不相容的硬體。」</i><br><br>糟糕!您的中央處理器不符合運行所需的最低需求。尤其,它不支援本程式運行現代大型語言模型所需的 AVX 指令集。目前唯一的解決方案,只有更新您的中央處理器及其相關硬體裝置。<br><br>更多資訊請查閱:<a href="https://zh.wikipedia.org/wiki/AVX指令集">AVX 指令集 - 維基百科</a> - + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. <h3>啟動時發生錯誤:</h3><br><i>「無法存取設定檔。」</i><br><br>糟糕!有些東西正在阻止程式存取設定檔。這極為可能是由於設定檔所在的本機應用程式設定資料夾中的權限設定不正確所造成的。煩請洽詢我們的 <a href="https://discord.gg/4M2QFmTt2k">Discord 伺服器</a> 以尋求協助。 - + Connection to datalake failed. 連線資料湖泊失敗。 - + Saving chats. 儲存交談。 - + Network dialog 資料湖泊計畫對話視窗 - + opt-in to share feedback/conversations 分享回饋/對話計畫 - + Home view 首頁視圖 - + Home view of application 應用程式首頁視圖 - + Home 首頁 - + Chat view 查看交談 - + Chat view to interact with models 模型互動交談視圖 - + Chats 交談 - - + + Models 模型 - + Models view for installed models 已安裝模型的模型視圖 - - + + LocalDocs 我的文件 - + LocalDocs view to configure and use local docs 用於設定與使用我的文件的「我的文件」視圖 - - + + Settings 設定 - + Settings view for application configuration 應用程式設定視圖 - + The datalake is enabled 資料湖泊已啟用 - + Using a network model 使用一個網路模型 - + Server mode is enabled 伺服器模式已啟用 - + Installed models 已安裝的模型 - + View of installed models 已安裝的模型視圖