From 2fe0f62b9281568198f09314fb76a622df1044aa Mon Sep 17 00:00:00 2001 From: Crowdin Bot Date: Tue, 26 Nov 2024 22:36:04 +0000 Subject: [PATCH] New Crowdin translations by GitHub Action --- i18n/ar/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/ar/data-broker-removals.md | 2 - i18n/ar/tools.md | 12 ++ i18n/bn-IN/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/bn-IN/data-broker-removals.md | 2 - i18n/bn-IN/tools.md | 12 ++ i18n/bn/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/bn/data-broker-removals.md | 2 - i18n/bn/tools.md | 12 ++ i18n/cs/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/cs/data-broker-removals.md | 2 - i18n/cs/tools.md | 12 ++ i18n/de/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/de/data-broker-removals.md | 2 - i18n/de/tools.md | 12 ++ i18n/el/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/el/data-broker-removals.md | 2 - i18n/el/tools.md | 12 ++ i18n/eo/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/eo/data-broker-removals.md | 2 - i18n/eo/tools.md | 12 ++ i18n/es/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/es/data-broker-removals.md | 2 - i18n/es/tools.md | 12 ++ i18n/fa/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/fa/data-broker-removals.md | 2 - i18n/fa/tools.md | 12 ++ i18n/fr/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/fr/data-broker-removals.md | 2 - i18n/fr/tools.md | 12 ++ i18n/he/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/he/data-broker-removals.md | 2 - i18n/he/tools.md | 12 ++ i18n/hi/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/hi/data-broker-removals.md | 2 - i18n/hi/tools.md | 12 ++ i18n/hu/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/hu/data-broker-removals.md | 2 - i18n/hu/tools.md | 12 ++ i18n/id/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/id/data-broker-removals.md | 2 - i18n/id/tools.md | 12 ++ i18n/it/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/it/data-broker-removals.md | 2 - i18n/it/tools.md | 12 ++ i18n/ja/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/ja/data-broker-removals.md | 2 - i18n/ja/tools.md | 12 ++ i18n/ko/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/ko/data-broker-removals.md | 2 - i18n/ko/tools.md | 12 ++ i18n/ku-IQ/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/ku-IQ/data-broker-removals.md | 2 - i18n/ku-IQ/tools.md | 12 ++ i18n/nl/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/nl/data-broker-removals.md | 2 - i18n/nl/tools.md | 12 ++ i18n/pl/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/pl/data-broker-removals.md | 2 - i18n/pl/tools.md | 12 ++ i18n/pt-BR/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/pt-BR/data-broker-removals.md | 2 - i18n/pt-BR/tools.md | 12 ++ i18n/pt/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/pt/data-broker-removals.md | 2 - i18n/pt/tools.md | 12 ++ i18n/ru/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/ru/data-broker-removals.md | 2 - i18n/ru/tools.md | 12 ++ i18n/sv/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/sv/data-broker-removals.md | 2 - i18n/sv/tools.md | 12 ++ i18n/tr/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/tr/data-broker-removals.md | 2 - i18n/tr/tools.md | 12 ++ i18n/uk/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/uk/data-broker-removals.md | 2 - i18n/uk/tools.md | 12 ++ i18n/vi/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/vi/data-broker-removals.md | 2 - i18n/vi/tools.md | 12 ++ i18n/zh-Hant/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/zh-Hant/data-broker-removals.md | 2 - i18n/zh-Hant/tools.md | 12 ++ i18n/zh/ai-chat.md | 189 +++++++++++++++++++++++++++ i18n/zh/data-broker-removals.md | 2 - i18n/zh/tools.md | 12 ++ includes/abbreviations.ar.txt | 6 + includes/abbreviations.bn-IN.txt | 6 + includes/abbreviations.bn.txt | 6 + includes/abbreviations.cs.txt | 6 + includes/abbreviations.de.txt | 6 + includes/abbreviations.el.txt | 6 + includes/abbreviations.eo.txt | 6 + includes/abbreviations.es.txt | 6 + includes/abbreviations.fa.txt | 6 + includes/abbreviations.fr.txt | 6 + includes/abbreviations.he.txt | 6 + includes/abbreviations.hi.txt | 6 + includes/abbreviations.hu.txt | 6 + includes/abbreviations.id.txt | 6 + includes/abbreviations.it.txt | 6 + includes/abbreviations.ja.txt | 6 + includes/abbreviations.ko.txt | 6 + includes/abbreviations.ku-IQ.txt | 6 + includes/abbreviations.nl.txt | 6 + includes/abbreviations.pl.txt | 6 + includes/abbreviations.pt-BR.txt | 6 + includes/abbreviations.pt.txt | 6 + includes/abbreviations.ru.txt | 6 + includes/abbreviations.sv.txt | 6 + includes/abbreviations.tr.txt | 6 + includes/abbreviations.uk.txt | 6 + includes/abbreviations.vi.txt | 6 + includes/abbreviations.zh-Hant.txt | 6 + includes/abbreviations.zh.txt | 6 + 116 files changed, 6003 insertions(+), 58 deletions(-) create mode 100644 i18n/ar/ai-chat.md create mode 100644 i18n/bn-IN/ai-chat.md create mode 100644 i18n/bn/ai-chat.md create mode 100644 i18n/cs/ai-chat.md create mode 100644 i18n/de/ai-chat.md create mode 100644 i18n/el/ai-chat.md create mode 100644 i18n/eo/ai-chat.md create mode 100644 i18n/es/ai-chat.md create mode 100644 i18n/fa/ai-chat.md create mode 100644 i18n/fr/ai-chat.md create mode 100644 i18n/he/ai-chat.md create mode 100644 i18n/hi/ai-chat.md create mode 100644 i18n/hu/ai-chat.md create mode 100644 i18n/id/ai-chat.md create mode 100644 i18n/it/ai-chat.md create mode 100644 i18n/ja/ai-chat.md create mode 100644 i18n/ko/ai-chat.md create mode 100644 i18n/ku-IQ/ai-chat.md create mode 100644 i18n/nl/ai-chat.md create mode 100644 i18n/pl/ai-chat.md create mode 100644 i18n/pt-BR/ai-chat.md create mode 100644 i18n/pt/ai-chat.md create mode 100644 i18n/ru/ai-chat.md create mode 100644 i18n/sv/ai-chat.md create mode 100644 i18n/tr/ai-chat.md create mode 100644 i18n/uk/ai-chat.md create mode 100644 i18n/vi/ai-chat.md create mode 100644 i18n/zh-Hant/ai-chat.md create mode 100644 i18n/zh/ai-chat.md diff --git a/i18n/ar/ai-chat.md b/i18n/ar/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/ar/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/ar/data-broker-removals.md b/i18n/ar/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/ar/data-broker-removals.md +++ b/i18n/ar/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php)) diff --git a/i18n/ar/tools.md b/i18n/ar/tools.md index 1bfab5f2..db960922 100644 --- a/i18n/ar/tools.md +++ b/i18n/ar/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/bn-IN/ai-chat.md b/i18n/bn-IN/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/bn-IN/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/bn-IN/data-broker-removals.md b/i18n/bn-IN/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/bn-IN/data-broker-removals.md +++ b/i18n/bn-IN/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/bn-IN/tools.md b/i18n/bn-IN/tools.md index c3253af8..754a7b61 100644 --- a/i18n/bn-IN/tools.md +++ b/i18n/bn-IN/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/bn/ai-chat.md b/i18n/bn/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/bn/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/bn/data-broker-removals.md b/i18n/bn/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/bn/data-broker-removals.md +++ b/i18n/bn/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/bn/tools.md b/i18n/bn/tools.md index c3253af8..754a7b61 100644 --- a/i18n/bn/tools.md +++ b/i18n/bn/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/cs/ai-chat.md b/i18n/cs/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/cs/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/cs/data-broker-removals.md b/i18n/cs/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/cs/data-broker-removals.md +++ b/i18n/cs/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/cs/tools.md b/i18n/cs/tools.md index c3253af8..754a7b61 100644 --- a/i18n/cs/tools.md +++ b/i18n/cs/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/de/ai-chat.md b/i18n/de/ai-chat.md new file mode 100644 index 00000000..e8a27f1d --- /dev/null +++ b/i18n/de/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Überwachungskapitalismus](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-Plattform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Mindestanforderungen + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Im besten Fall + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Unsere Empfehlungen enthalten möglicherweise keine oder nicht alle dieser Merkmale, aber diejenigen, die sie enthalten, werden möglicherweise höher eingestuft als andere auf dieser Seite. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/de/data-broker-removals.md b/i18n/de/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/de/data-broker-removals.md +++ b/i18n/de/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/de/tools.md b/i18n/de/tools.md index 6dbbc002..7cc11aca 100644 --- a/i18n/de/tools.md +++ b/i18n/de/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/el/ai-chat.md b/i18n/el/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/el/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/el/data-broker-removals.md b/i18n/el/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/el/data-broker-removals.md +++ b/i18n/el/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/el/tools.md b/i18n/el/tools.md index 3c1d92c9..ba0c7372 100644 --- a/i18n/el/tools.md +++ b/i18n/el/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/eo/ai-chat.md b/i18n/eo/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/eo/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/eo/data-broker-removals.md b/i18n/eo/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/eo/data-broker-removals.md +++ b/i18n/eo/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/eo/tools.md b/i18n/eo/tools.md index c3253af8..754a7b61 100644 --- a/i18n/eo/tools.md +++ b/i18n/eo/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/es/ai-chat.md b/i18n/es/ai-chat.md new file mode 100644 index 00000000..66fa5b7a --- /dev/null +++ b/i18n/es/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protege contra la(s) siguiente(s) amenaza(s): + +- [:material-server-network: Proveedores de Servicios](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Capitalismo de Vigilancia](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multiplataforma | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads "Descargas" + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads "Descargas" + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads "Descargas" + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criterios + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Requisitos Mínimos + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Mejor Caso + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Es posible que nuestras recomendaciones no incluyan todas o algunas de estas funciones, pero las que sí las incluyan pueden estar mejor clasificadas que otras en esta página. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/es/data-broker-removals.md b/i18n/es/data-broker-removals.md index 7352596b..ede1c7a4 100644 --- a/i18n/es/data-broker-removals.md +++ b/i18n/es/data-broker-removals.md @@ -86,8 +86,6 @@ Nuestras pruebas indican que EasyOptOuts ofrece la mejor relación calidad-preci EasyOptOuts no cubre los siguientes sitios que consideramos de "alta prioridad", por lo que deberías excluirte manualmente: - Intelius ([Buscar](https://intelius.com), [Darse de baja(https://suppression.peopleconnect.us/login)) -- PeekYou ([Buscar](https://peekyou.com), [Darse de baja](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Buscar](https://publicdatausa.com), [Darse de baja](https://publicdatausa.com/remove.php))
diff --git a/i18n/es/tools.md b/i18n/es/tools.md index 5ffe5d06..9faab1e8 100644 --- a/i18n/es/tools.md +++ b/i18n/es/tools.md @@ -359,6 +359,18 @@ Si busca más **seguridad**, asegúrese siempre de conectarse a sitios web que u ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Más información :material-arrow-right-drop-circle:](ai-chat.md) + ### Sincronización del Calendario
diff --git a/i18n/fa/ai-chat.md b/i18n/fa/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/fa/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/fa/data-broker-removals.md b/i18n/fa/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/fa/data-broker-removals.md +++ b/i18n/fa/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/fa/tools.md b/i18n/fa/tools.md index 332f585d..6a19212a 100644 --- a/i18n/fa/tools.md +++ b/i18n/fa/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/fr/ai-chat.md b/i18n/fr/ai-chat.md new file mode 100644 index 00000000..52bbe386 --- /dev/null +++ b/i18n/fr/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-plateforme | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Téléchargements + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Téléchargements + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Téléchargements + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Critères + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Exigences minimales + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Dans le meilleur des cas + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Nos recommandations peuvent ne pas inclure tout ou partie de cette fonctionnalité, mais celles qui l'inclus peuvent être mieux classées que les autres sur cette page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/fr/data-broker-removals.md b/i18n/fr/data-broker-removals.md index c7894e8f..3f9888b9 100644 --- a/i18n/fr/data-broker-removals.md +++ b/i18n/fr/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/fr/tools.md b/i18n/fr/tools.md index 03f74367..6ed03c5a 100644 --- a/i18n/fr/tools.md +++ b/i18n/fr/tools.md @@ -358,6 +358,18 @@ Nous [recommandons](dns.md#recommended-providers) un certain nombre de serveurs ## Logiciels +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[En savoir plus :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendriers synchronisés
diff --git a/i18n/he/ai-chat.md b/i18n/he/ai-chat.md new file mode 100644 index 00000000..6f57085e --- /dev/null +++ b/i18n/he/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| מרובה-פלטפורמות | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## קריטריונים + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### דרישות מינימליות + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### המקרה הטוב ביותר + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. ייתכן שההמלצות שלנו לא יכללו חלק מהפונקציונליות הזו או את כולה, אך אלו שכן כן עשויות לדרג גבוה יותר מאחרות בדף זה. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/he/data-broker-removals.md b/i18n/he/data-broker-removals.md index 1e332b38..88f15445 100644 --- a/i18n/he/data-broker-removals.md +++ b/i18n/he/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/he/tools.md b/i18n/he/tools.md index 70dc01b5..6bf10698 100644 --- a/i18n/he/tools.md +++ b/i18n/he/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## תוכנה +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[למד עוד :material-arrow-right-drop-circle:](ai-chat.md) + ### סנכרון לוח שנה
diff --git a/i18n/hi/ai-chat.md b/i18n/hi/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/hi/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/hi/data-broker-removals.md b/i18n/hi/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/hi/data-broker-removals.md +++ b/i18n/hi/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/hi/tools.md b/i18n/hi/tools.md index c3253af8..754a7b61 100644 --- a/i18n/hi/tools.md +++ b/i18n/hi/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/hu/ai-chat.md b/i18n/hu/ai-chat.md new file mode 100644 index 00000000..54873142 --- /dev/null +++ b/i18n/hu/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Követelmények + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Alap elvárások + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Legjobb esetben + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Előfordulhat, hogy ajánlásaink nem tartalmazzák az összes ilyen funkciót, de azok, amelyek igen, magasabb helyen szerepelhetnek, mint mások ezen az oldalon. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/hu/data-broker-removals.md b/i18n/hu/data-broker-removals.md index fb7baa9a..89552b1b 100644 --- a/i18n/hu/data-broker-removals.md +++ b/i18n/hu/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/hu/tools.md b/i18n/hu/tools.md index df7f950d..a2a851e4 100644 --- a/i18n/hu/tools.md +++ b/i18n/hu/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Szoftver +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[További információ :material-arrow-right-drop-circle:](ai-chat.md) + ### Naptár Szinkronizálás
diff --git a/i18n/id/ai-chat.md b/i18n/id/ai-chat.md new file mode 100644 index 00000000..ce7d8bdb --- /dev/null +++ b/i18n/id/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Kriteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Persyaratan Minimum + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Kasus Terbaik + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Rekomendasi kami mungkin tidak menyertakan salah satu atau semua fungsi ini, tetapi rekomendasi yang menyertakan fungsi ini mungkin memiliki peringkat yang lebih tinggi daripada yang lain di halaman ini. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/id/data-broker-removals.md b/i18n/id/data-broker-removals.md index 3bd4524c..c364b427 100644 --- a/i18n/id/data-broker-removals.md +++ b/i18n/id/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/id/tools.md b/i18n/id/tools.md index a18d4413..6c36d06a 100644 --- a/i18n/id/tools.md +++ b/i18n/id/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Perangkat lunak +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Pelajari lebih lanjut :material-arrow-right-drop-circle:](ai-chat.md) + ### Sinkronisasi Kalender
diff --git a/i18n/it/ai-chat.md b/i18n/it/ai-chat.md new file mode 100644 index 00000000..46ccbbf0 --- /dev/null +++ b/i18n/it/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multipiattaforma | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Scarica + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Scarica + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Scarica + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteri + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Requisiti minimi + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Caso migliore + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. I nostri consigli potrebbero non includere tutte o alcune di queste funzionalità, ma quelli che le includono potrebbero essere preferiti ad altri su questa pagina. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/it/data-broker-removals.md b/i18n/it/data-broker-removals.md index 9af2f27b..71108635 100644 --- a/i18n/it/data-broker-removals.md +++ b/i18n/it/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/it/tools.md b/i18n/it/tools.md index 38607f8d..8bfa2a6c 100644 --- a/i18n/it/tools.md +++ b/i18n/it/tools.md @@ -358,6 +358,18 @@ Se stai cercando maggiore **sicurezza**, dovresti sempre assicurarti di connette ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Scopri di più :material-arrow-right-drop-circle:](ai-chat.md) + ### Sincronizzazione Calendario
diff --git a/i18n/ja/ai-chat.md b/i18n/ja/ai-chat.md new file mode 100644 index 00000000..bb201c5d --- /dev/null +++ b/i18n/ja/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| 複数プラットフォーム | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## 規準 + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### 最低要件 + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### 満たされることが望ましい基準 + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. 私たちが推薦するプロジェクトは、この機能の一部または全部を含んでいないかもしれませんが、もし含んでいれば、このページで他のプロジェクトよりも上位にランクされるかもしれません。 + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/ja/data-broker-removals.md b/i18n/ja/data-broker-removals.md index 03175ecd..0a8a8886 100644 --- a/i18n/ja/data-broker-removals.md +++ b/i18n/ja/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/ja/tools.md b/i18n/ja/tools.md index 158c312e..1967e1d5 100644 --- a/i18n/ja/tools.md +++ b/i18n/ja/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## ソフトウェア +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[詳細 :material-arrow-right-drop-circle:](ai-chat.md) + ### カレンダー同期
diff --git a/i18n/ko/ai-chat.md b/i18n/ko/ai-chat.md new file mode 100644 index 00000000..08c1d7a6 --- /dev/null +++ b/i18n/ko/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## 평가 기준 + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### 최소 요구 사항 + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### 우대 사항 + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. 다음의 우대 사항에 해당하지 않더라도 권장 목록에 포함될 수 있습니다. 단, 우대 사항에 해당할수록 이 페이지의 다른 항목보다 높은 순위를 갖습니다. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/ko/data-broker-removals.md b/i18n/ko/data-broker-removals.md index 66ef1b38..6a365b8e 100644 --- a/i18n/ko/data-broker-removals.md +++ b/i18n/ko/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/ko/tools.md b/i18n/ko/tools.md index 8fdc3a90..f476a42d 100644 --- a/i18n/ko/tools.md +++ b/i18n/ko/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## 소프트웨어 +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[자세히 알아보기 :material-arrow-right-drop-circle:](ai-chat.md) + ### 캘린더 동기화
diff --git a/i18n/ku-IQ/ai-chat.md b/i18n/ku-IQ/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/ku-IQ/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/ku-IQ/data-broker-removals.md b/i18n/ku-IQ/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/ku-IQ/data-broker-removals.md +++ b/i18n/ku-IQ/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/ku-IQ/tools.md b/i18n/ku-IQ/tools.md index 849a25b4..11cae742 100644 --- a/i18n/ku-IQ/tools.md +++ b/i18n/ku-IQ/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/nl/ai-chat.md b/i18n/nl/ai-chat.md new file mode 100644 index 00000000..57e083d1 --- /dev/null +++ b/i18n/nl/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimale vereisten + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Beste geval + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Het is mogelijk dat onze aanbevelingen geen of niet alle functies bevatten, maar degene die dat wel doen kunnen hoger gerangschikt worden dan andere op deze pagina. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/nl/data-broker-removals.md b/i18n/nl/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/nl/data-broker-removals.md +++ b/i18n/nl/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/nl/tools.md b/i18n/nl/tools.md index c16363b9..800599a6 100644 --- a/i18n/nl/tools.md +++ b/i18n/nl/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Meer informatie :material-arrow-right-drop-circle:](ai-chat.md) + ### Kalendersynchronisatie
diff --git a/i18n/pl/ai-chat.md b/i18n/pl/ai-chat.md new file mode 100644 index 00000000..03a8e5b5 --- /dev/null +++ b/i18n/pl/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Międzyplatformowe | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/pl/data-broker-removals.md b/i18n/pl/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/pl/data-broker-removals.md +++ b/i18n/pl/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/pl/tools.md b/i18n/pl/tools.md index 0f0c9348..26165e11 100644 --- a/i18n/pl/tools.md +++ b/i18n/pl/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Dowiedz się więcej :hero-arrow-circle-right-fill:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/pt-BR/ai-chat.md b/i18n/pt-BR/ai-chat.md new file mode 100644 index 00000000..cb3164df --- /dev/null +++ b/i18n/pt-BR/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| --------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Aplicativos multiplataforma | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Melhor Caso + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/pt-BR/data-broker-removals.md b/i18n/pt-BR/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/pt-BR/data-broker-removals.md +++ b/i18n/pt-BR/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/pt-BR/tools.md b/i18n/pt-BR/tools.md index 94df08e5..b99645ac 100644 --- a/i18n/pt-BR/tools.md +++ b/i18n/pt-BR/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Programas (Software) +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Saiba mais :material-arrow-right-drop-circle:](ai-chat.md) + ### Agenda/Calendário Sincronizado
diff --git a/i18n/pt/ai-chat.md b/i18n/pt/ai-chat.md new file mode 100644 index 00000000..670fc24f --- /dev/null +++ b/i18n/pt/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-plataforma | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Framadate + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Requisitos mínimos + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Melhor caso + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. As nossas recomendações podem não incluir todas as funcionalidades, mas incluem as que, na nossa opinião, têm um impacto mais elevado. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/pt/data-broker-removals.md b/i18n/pt/data-broker-removals.md index 1e235385..e154c15a 100644 --- a/i18n/pt/data-broker-removals.md +++ b/i18n/pt/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/pt/tools.md b/i18n/pt/tools.md index cf172ace..3fbbe207 100644 --- a/i18n/pt/tools.md +++ b/i18n/pt/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Saiba mais :material-arrow-right-drop-circle:](ai-chat.md) + ### Sincronização do calendário
diff --git a/i18n/ru/ai-chat.md b/i18n/ru/ai-chat.md new file mode 100644 index 00000000..92ee1025 --- /dev/null +++ b/i18n/ru/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| ------------------------------ | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Мультиплатформенные приложения | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Критерии + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Минимальные требования к сервисам + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### В лучшем случае + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Наши рекомендации могут не соответствовать всем или нескольким из этих критериев, но проекты, которые им соответствуют, расположены выше остальных. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/ru/data-broker-removals.md b/i18n/ru/data-broker-removals.md index 8c4c4bfa..639d0cb5 100644 --- a/i18n/ru/data-broker-removals.md +++ b/i18n/ru/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/ru/tools.md b/i18n/ru/tools.md index 3e48dcc6..48d2f26e 100644 --- a/i18n/ru/tools.md +++ b/i18n/ru/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Программное обеспечение +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Узнать больше :material-arrow-right-drop-circle:](ai-chat.md) + ### Синхронизация календаря
diff --git a/i18n/sv/ai-chat.md b/i18n/sv/ai-chat.md new file mode 100644 index 00000000..03052c52 --- /dev/null +++ b/i18n/sv/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multiplattform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Kriterier + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimikrav + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Bästa fall + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Våra rekommendationer kanske inte innehåller alla eller några av dessa funktioner, men de som gör det kan vara högre rankade än andra på den här sidan. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/sv/data-broker-removals.md b/i18n/sv/data-broker-removals.md index 834e43a6..6b1f7e56 100644 --- a/i18n/sv/data-broker-removals.md +++ b/i18n/sv/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/sv/tools.md b/i18n/sv/tools.md index 3632920a..f8ac6a4a 100644 --- a/i18n/sv/tools.md +++ b/i18n/sv/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Programvara +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Läs mer :material-arrow-right-drop-circle:](ai-chat.md) + ### Kalendersynkronisering
diff --git a/i18n/tr/ai-chat.md b/i18n/tr/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/tr/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/tr/data-broker-removals.md b/i18n/tr/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/tr/data-broker-removals.md +++ b/i18n/tr/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/tr/tools.md b/i18n/tr/tools.md index 5ab05c1e..ac297ff0 100644 --- a/i18n/tr/tools.md +++ b/i18n/tr/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/uk/ai-chat.md b/i18n/uk/ai-chat.md new file mode 100644 index 00000000..af64bd7d --- /dev/null +++ b/i18n/uk/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/uk/data-broker-removals.md b/i18n/uk/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/uk/data-broker-removals.md +++ b/i18n/uk/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/uk/tools.md b/i18n/uk/tools.md index f3e960b7..b5448732 100644 --- a/i18n/uk/tools.md +++ b/i18n/uk/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/vi/ai-chat.md b/i18n/vi/ai-chat.md new file mode 100644 index 00000000..ebd723de --- /dev/null +++ b/i18n/vi/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| Multi-platform | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Framadate + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/vi/data-broker-removals.md b/i18n/vi/data-broker-removals.md index 1e235385..e154c15a 100644 --- a/i18n/vi/data-broker-removals.md +++ b/i18n/vi/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/vi/tools.md b/i18n/vi/tools.md index 96336daf..187ce5c5 100644 --- a/i18n/vi/tools.md +++ b/i18n/vi/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[Learn more :material-arrow-right-drop-circle:](ai-chat.md) + ### Calendar Sync
diff --git a/i18n/zh-Hant/ai-chat.md b/i18n/zh-Hant/ai-chat.md new file mode 100644 index 00000000..87f64448 --- /dev/null +++ b/i18n/zh-Hant/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +防護下列威脅: + +- [:material-server-network: 服務提供商](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: 監控資本主義](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| 多平臺 | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+下載 + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+下載 + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+下載 + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## 標準 + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### 最低合格要求 + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### 最佳情況 + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. 推薦產品可能沒有此功能,但若有這些功能則會讓排名更為提高。 + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/zh-Hant/data-broker-removals.md b/i18n/zh-Hant/data-broker-removals.md index b87196ce..f1a5a9b7 100644 --- a/i18n/zh-Hant/data-broker-removals.md +++ b/i18n/zh-Hant/data-broker-removals.md @@ -86,8 +86,6 @@ EasyOptOuts 支援的某些網站可公開搜尋。 在這些情況下,EasyOpt EasyOptOuts 並不涵蓋下列我們認為「高度優先」的網站,因此您仍需手動選擇退出: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/zh-Hant/tools.md b/i18n/zh-Hant/tools.md index f658072d..d612b20e 100644 --- a/i18n/zh-Hant/tools.md +++ b/i18n/zh-Hant/tools.md @@ -358,6 +358,18 @@ description: Privacy Guides 社群所推薦的隱私工具、服務、軟體及 ## 軟體 +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[了解更多 :material-arrow-right-drop-circle:](ai-chat.md) + ### 行事曆同步
diff --git a/i18n/zh/ai-chat.md b/i18n/zh/ai-chat.md new file mode 100644 index 00000000..34c65595 --- /dev/null +++ b/i18n/zh/ai-chat.md @@ -0,0 +1,189 @@ +--- +meta_title: "Recommended AI Chat: Private ChatGPT Alternatives - Privacy Guides" +title: AI Chat +icon: material/assistant +description: Unlike OpenAI's ChatGPT and its Big Tech competitors, these AI tools run locally so your data never leaves your desktop device. +cover: ai-chatbots.webp +--- + +Protects against the following threat(s): + +- [:material-server-network: Service Providers](basics/common-threats.md#privacy-from-service-providers){ .pg-teal } +- [:material-account-cash: Surveillance Capitalism](basics/common-threats.md#surveillance-as-a-business-model){ .pg-brown } +- [:material-close-outline: Censorship](basics/common-threats.md#avoiding-censorship){ .pg-blue-gray } + +Since the release of ChatGPT in 2022, interactions with Large Language Models (LLMs) have become increasingly common. LLMs can help us write better, understand unfamiliar subjects, or answer a wide range of questions. They can statistically predict the next word based on a vast amount of data scraped from the web. + +## Privacy Concerns About LLMs + +Data used to train AI models, however, includes a massive amount of publicly available data scraped from the web, which can include sensitive information like names and addresses. Cloud-based AI software often [collects your inputs](https://openai.com/policies/row-privacy-policy), meaning your chats are not private from them. This practice also introduces a risk of data breaches. Furthermore, there is a real possibility that an LLM will leak your private chat information in future conversations with other users. + +If you are concerned about these practices, you can either refuse to use AI, or use [truly open-source models](https://proton.me/blog/how-to-build-privacy-first-ai) which publicly release and allow you to inspect their training datasets. One such model is [OLMoE](https://allenai.org/blog/olmoe-an-open-small-and-state-of-the-art-mixture-of-experts-model-c258432d0514) made by [Ai2](https://allenai.org/open-data). + +Alternatively, you can run AI models locally so that your data never leaves your device and is therefore never shared with third parties. As such, local models are a more private and secure alternative to cloud-based solutions and allow you to share sensitive information to the AI model without worry. + +## AI Models + +### Hardware for Local AI Models + +Local models are also fairly accessible. It's possible to run smaller models at lower speeds on as little as 8GB of RAM. Using more powerful hardware such as a dedicated GPU with sufficient VRAM or a modern system with fast LPDDR5X memory offers the best experience. + +LLMs can usually be differentiated by the number of parameters, which can vary between 1.3B to 405B for open-source models available for end users. For example, models below 6.7B parameters are only good for basic tasks like text summaries, while models between 7B and 13B are a great compromise between quality and speed. Models with advanced reasoning capabilities are generally around 70B. + +For consumer-grade hardware, it is generally recommended to use [quantized models](https://huggingface.co/docs/optimum/en/concept_guides/quantization) for the best balance between model quality and performance. Check out the table below for more precise information about the typical requirements for different sizes of quantized models. + +| Model Size (in Parameters) | Minimum RAM | Minimum Processor | +| --------------------------------------------- | ----------- | -------------------------------------------- | +| 7B | 8GB | Modern CPU (AVX2 support) | +| 13B | 16GB | Modern CPU (AVX2 support) | +| 70B | 72GB | GPU with VRAM | + +To run AI locally, you need both an AI model and an AI client. + +### Choosing a Model + +There are many permissively licensed models available to download. [Hugging Face](https://huggingface.co/models) is a platform that lets you browse, research, and download models in common formats like [GGUF](https://huggingface.co/docs/hub/en/gguf). Companies that provide good open-weights models include big names like Mistral, Meta, Microsoft, and Google. However, there are also many community models and 'fine-tunes' available. As mentioned above, quantized models offer the best balance between model quality and performance for those using consumer-grade hardware. + +To help you choose a model that fits your needs, you can look at leaderboards and benchmarks. The most widely-used leaderboard is the community-driven [LM Arena](https://lmarena.ai). Additionally, the [OpenLLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) focuses on the performance of open-weights models on common benchmarks like [MMLU-Pro](https://arxiv.org/abs/2406.01574). There are also specialized benchmarks which measure factors like [emotional intelligence](https://eqbench.com), ["uncensored general intelligence"](https://huggingface.co/spaces/DontPlanToEnd/UGI-Leaderboard), and [many others](https://www.nebuly.com/blog/llm-leaderboards). + +## AI Chat Clients + +| Feature | [Kobold.cpp](#koboldcpp) | [Ollama](#ollama-cli) | [Llamafile](#llamafile) | +| -------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| GPU Support | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-check:{ .pg-green } | +| Image Generation | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Speech Recognition | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-close:{ .pg-red } | +| Auto-download Models | :material-close:{ .pg-red } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Few models available | +| Custom Parameters | :material-check:{ .pg-green } | :material-close:{ .pg-red } | :material-check:{ .pg-green } | +| 多平台 | :material-check:{ .pg-green } | :material-check:{ .pg-green } | :material-alert-outline:{ .pg-orange } Size limitations on Windows | + +### Kobold.cpp + +
+ +![Kobold.cpp Logo](assets/img/ai-chat/kobold.png){align=right} + +Kobold.cpp is an AI client that runs locally on your Windows, Mac, or Linux computer. It's an excellent choice if you are looking for heavy customization and tweaking, such as for role-playing purposes. + +In addition to supporting a large range of text models, Kobold.cpp also supports image generators such as [Stable Diffusion](https://stability.ai/stable-image) and automatic speech recognition tools such as [Whisper](https://github.com/ggerganov/whisper.cpp). + +[:octicons-home-16: Homepage](https://github.com/LostRuins/koboldcpp){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/LostRuins/koboldcpp/wiki){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/LostRuins/koboldcpp){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/LostRuins/koboldcpp/blob/2f3597c29abea8b6da28f21e714b6b24a5aca79b/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-apple: macOS](https://github.com/LostRuins/koboldcpp/releases) +- [:simple-linux: Linux](https://github.com/LostRuins/koboldcpp/releases) + +
+ +
+ +
+

Compatibility Issues

+ +Kobold.cpp might not run on computers without AVX/AVX2 support. + +
+ +Kobold.cpp allows you to modify parameters such as the AI model temperature and the AI chat's system prompt. It also supports creating a network tunnel to access AI models from other devices such as your phone. + +### Ollama (CLI) + +
+ +![Ollama Logo](assets/img/ai-chat/ollama.png){align=right} + +Ollama is a command-line AI assistant that is available on macOS, Linux, and Windows. Ollama is a great choice if you're looking for an AI client that's easy-to-use, widely compatible, and fast due to its use of inference and other techniques. It also doesn't involve any manual setup. + +In addition to supporting a wide range of text models, Ollama also supports [LLaVA](https://github.com/haotian-liu/LLaVA) models and has experimental support for Meta's [Llama vision capabilities](https://huggingface.co/blog/llama32#what-is-llama-32-vision). + +[:octicons-home-16: Homepage](https://ollama.com){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/ollama/ollama#readme){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/ollama/ollama){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/ollama/ollama/blob/a14f76491d694b2f5a0dec6473514b7f93beeea0/SECURITY.md){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-brands-windows: Windows](https://ollama.com/download/windows) +- [:simple-apple: macOS](https://ollama.com/download/mac) +- [:simple-linux: Linux](https://ollama.com/download/linux) + +
+ +
+ +Ollama simplifies the process of setting up a local AI chat by downloading the AI model you want to use automatically. For example, running `ollama run llama3.2` will automatically download and run the Llama 3.2 model. Furthermore, Ollama maintains their own [model library](https://ollama.com/library) where they host the files of various AI models. This ensures that models are vetted for both performance and security, eliminating the need to manually verify model authenticity. + +### Llamafile + +
+ +![Llamafile Logo](assets/img/ai-chat/llamafile.svg){align=right} + +Llamafile is a lightweight single-file executable that allows users to run LLMs locally on their own computers without any setup involved. It is [backed by Mozilla](https://hacks.mozilla.org/2023/11/introducing-llamafile) and available on Linux, macOS, and Windows. + +Llamafile also supports LLaVA. However, it doesn't support speech recognition or image generation. + +[:octicons-home-16: Homepage](https://github.com/Mozilla-Ocho/llamafile){ .md-button .md-button--primary } +[:octicons-info-16:](https://github.com/Mozilla-Ocho/llamafile#llamafile){ .card-link title="Documentation" } +[:octicons-code-16:](https://github.com/Mozilla-Ocho/llamafile){ .card-link title="Source Code" } +[:octicons-lock-16:](https://github.com/Mozilla-Ocho/llamafile#security){ .card-link title="Security Policy" } + +
+Downloads + +- [:fontawesome-solid-desktop: Desktop](https://github.com/Mozilla-Ocho/llamafile#quickstart) + +
+ +
+ +Mozilla has made llamafiles available for only some Llama and Mistral models, while there are few third-party llamafiles available. Moreover, Windows limits `.exe` files to 4GB, and most models are larger than that. + +To circumvent these issues, you can [load external weights](https://github.com/Mozilla-Ocho/llamafile#using-llamafile-with-external-weights). + +## Securely Downloading Models + +If you use an AI client that maintains their own library of model files (such as [Ollama](#ollama-cli) and [Llamafile](#llamafile)), you should download it from there. However, if you want to download models not present in their library, or use an AI client that doesn't maintain its library (such as [Kobold.cpp](#koboldcpp)), you will need to take extra steps to ensure that the AI model you download is safe and legitimate. + +We recommend downloading model files from Hugging Face since it provides several features to verify that your download is genuine and safe to use. + +To check the authenticity and safety of the model, look for: + +- Model cards with clear documentation +- A verified organization badge +- Community reviews and usage statistics +- A "Safe" badge next to the model file (Hugging Face only) +- Matching checksums[^1] + - On Hugging Face, you can find the hash by clicking on a model file and looking for the **Copy SHA256** button below it. You should compare this checksum with the one from the model file you downloaded. + +A downloaded model is generally safe if it satisfies all of the above checks. + +## Criteria + +Please note we are not affiliated with any of the projects we recommend. In addition to [our standard criteria](about/criteria.md), we have developed a clear set of requirements to allow us to provide objective recommendations. We suggest you familiarize yourself with this list before choosing to use a project and conduct your own research to ensure it's the right choice for you. + +### Minimum Requirements + +- Must be open-source. +- Must not transmit personal data, including chat data. +- Must be multi-platform. +- Must not require a GPU. +- Must have support for GPU-powered fast inference. +- Must not require an internet connection. + +### Best-Case + +Our best-case criteria represent what we _would_ like to see from the perfect project in this category. Our recommendations may not include any or all of this functionality, but those which do may rank higher than others on this page. + +- Should be easy to download and set up, e.g. with a one-click install process. +- Should have a built-in model downloader option. +- The user should be able to modify the LLM parameters, such as its system prompt or temperature. + +[^1]: A file checksum is a type of anti-tampering fingerprint. A developer usually provides a checksum in a text file that can be downloaded separately, or on the download page itself. Verifying that the checksum of the file you downloaded matches the one provided by the developer helps ensure that the file is genuine and wasn't tampered with in transit. You can use commands like `sha256sum` on Linux and macOS, or `certutil -hashfile file SHA256` on Windows to generate the downloaded file's checksum. diff --git a/i18n/zh/data-broker-removals.md b/i18n/zh/data-broker-removals.md index 8ea1a885..6a784c4f 100644 --- a/i18n/zh/data-broker-removals.md +++ b/i18n/zh/data-broker-removals.md @@ -86,8 +86,6 @@ Our testing indicates that EasyOptOuts provides the best value out of any data r EasyOptOuts does not cover the following sites we consider to be "high priority," so you should still manually opt-out of: - Intelius ([Search](https://intelius.com), [Opt-Out](https://suppression.peopleconnect.us/login)) -- PeekYou ([Search](https://peekyou.com), [Opt-Out](https://peekyou.com/about/contact/optout)) -- PublicDataUSA ([Search](https://publicdatausa.com), [Opt-Out](https://publicdatausa.com/remove.php))
diff --git a/i18n/zh/tools.md b/i18n/zh/tools.md index 3085152e..f6a5b769 100644 --- a/i18n/zh/tools.md +++ b/i18n/zh/tools.md @@ -358,6 +358,18 @@ We [recommend](dns.md#recommended-providers) a number of encrypted DNS servers b ## Software +### AI Chat + +
+ +- ![Kobold logo](assets/img/ai-chat/kobold.png){ .twemoji loading=lazy } [Kobold.cpp](ai-chat.md#koboldcpp) +- ![Llamafile logo](assets/img/ai-chat/llamafile.svg){ .twemoji loading=lazy } [Llamafile](ai-chat.md#llamafile) +- ![Ollama logo](assets/img/ai-chat/ollama.png){ .twemoji loading=lazy } [Ollama (CLI)](ai-chat.md#ollama-cli) + +
+ +[了解更多 :hero-arrow-circle-right-fill:](ai-chat.md) + ### VPN供应商
diff --git a/includes/abbreviations.ar.txt b/includes/abbreviations.ar.txt index f0738f78..0d039f57 100644 --- a/includes/abbreviations.ar.txt +++ b/includes/abbreviations.ar.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.bn-IN.txt b/includes/abbreviations.bn-IN.txt index 03c478c7..4d2f9ada 100644 --- a/includes/abbreviations.bn-IN.txt +++ b/includes/abbreviations.bn-IN.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.bn.txt b/includes/abbreviations.bn.txt index 2b60fa15..f9b1abe4 100644 --- a/includes/abbreviations.bn.txt +++ b/includes/abbreviations.bn.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.cs.txt b/includes/abbreviations.cs.txt index e4445741..9b25e4b2 100644 --- a/includes/abbreviations.cs.txt +++ b/includes/abbreviations.cs.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Nativní rozhraní Java *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: Jednorázové heslo *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Linux s vylepšenou bezpečností @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: Systém na čipu *[SSO]: Jednotné přihlášení +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.de.txt b/includes/abbreviations.de.txt index edf3b70d..f038921c 100644 --- a/includes/abbreviations.de.txt +++ b/includes/abbreviations.de.txt @@ -49,6 +49,8 @@ *[ISPs]: Internetdienstanbieter *[JNI]: Native Java-Schnittstelle *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Anwendungsschutz @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Betriebssystem *[OTP]: Einmalpasswort *[OTPs]: Einmalpasswörter @@ -73,6 +76,7 @@ *[PII]: Personenbezogene Daten *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates, die häufig und nicht nur in bestimmten Abständen veröffentlicht werden *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.el.txt b/includes/abbreviations.el.txt index e026f159..75f04859 100644 --- a/includes/abbreviations.el.txt +++ b/includes/abbreviations.el.txt @@ -49,6 +49,8 @@ *[ISPs]: Πάροχοι Υπηρεσιών Διαδικτύου *[JNI]: Java Native Interface (ελληνιστί: Εγγενής Διεπαφή Java) *[KYC]: Μέτρα Δέουσας Επιμέλειας (νομικός όρος) ή "Συστηθείτε" (μαρκετίστικος όρος) +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Ενοποιημένο Στήσιμο Κλειδιού Linux (Κρυπτογράφηση Πλήρους Δίσκου) *[MAC]: Έλεγχος Προσπέλασης Μέσου (αγγλικά: Medium Access Control) *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol (ελληνιστί: Πρωτόκολλο Διαδικτυακού Πιστοποιητικού Κατάστασης) *[OEM]: Παραγωγός Πρωτότυπου Εξοπλισμού *[OEMs]: Παραγωγοί Πρωτότυπου Εξοπλισμού +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[ΛΣ]: Λειτουργικό Σύστημα *[OTP]: Κωδικός Μιας-Χρήσης *[OTPs]: Κωδικοί Μιας-Χρήσης @@ -73,6 +76,7 @@ *[PII]: Προσωπικές Ταυτοποιήσιμες Πληροφορίες (αγγλικά: Personally Identifiable Information) *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[κυλιόμενη κυκλοφορία]: Άμεσες ενημερώσεις χωρίς προκαθορισμένο χρονοδιάγραμμα κυκλοφορίας (αγγλικά: rolling release) *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Λογισμικό ως Υπηρεσία ("Λογισμικό Νέφους") *[SoC]: System on Chip *[SSO]: Ενιαία Σύνδεση (αγγλικά: Single Sign-On) +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol (ελληνιστί: Πρωτόκολλο Ελέγχου Μεταφοράς) *[TEE]: Trusted Execution Environment (ελληνιστί: Εμπιστευμένο Περιβάλλον Εκτέλεσης) *[TLS]: Transport Layer Security (ελληνιστί: Ασφάλεια Επιπέδου Μεταφοράς) diff --git a/includes/abbreviations.eo.txt b/includes/abbreviations.eo.txt index 03c478c7..4d2f9ada 100644 --- a/includes/abbreviations.eo.txt +++ b/includes/abbreviations.eo.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.es.txt b/includes/abbreviations.es.txt index 5b996bd5..1ac39430 100644 --- a/includes/abbreviations.es.txt +++ b/includes/abbreviations.es.txt @@ -49,6 +49,8 @@ *[ISPs]: Proveedores de Servicio de Internet *[JNI]: Interfaz nativa de Java *[KYC]: Conoce a Tu Cliente +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Configuración de clave unificada Linux (cifrado de disco completo) *[MAC]: Control de Acceso a los Medios *[MDAG]: Protección de aplicaciones de Microsoft Defender @@ -62,6 +64,7 @@ *[OCSP]: Protocolo del Estado del Certificado de Línea *[OEM]: Fabricante Original de Equipo *[OEMs]: Fabricantes Originales de Equipos +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Sistema Operativo *[OTP]: Contraseña de Un Solo Uso *[OTPs]: Contraseña de Un Solo Uso @@ -73,6 +76,7 @@ *[PII]: Información Personalmente Identificable *[QNAME]: Nombre Cualificado *[QUIC]: Un protocolo de red basado en UDP, pero que pretende combinar la velocidad de UDP con la fiabilidad de TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[liberación progresiva]: Actualizaciones que se publican frecuentemente en lugar de intervalos establecidos *[RSS]: Sindicación Realmente Sencilla *[SELinux]: Linux con Seguridad Mejorada @@ -86,6 +90,8 @@ *[SaaS]: Software como servicio (software en la nube) *[SoC]: Sistema en chip *[SSO]: Inicio de sesión único +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protocolo de Control de Transmisión *[TEE]: Entorno de Ejecución de Confianza *[TLS]: Seguridad de la Capa de Transporte diff --git a/includes/abbreviations.fa.txt b/includes/abbreviations.fa.txt index 7990fc9d..f4b53dfd 100644 --- a/includes/abbreviations.fa.txt +++ b/includes/abbreviations.fa.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.fr.txt b/includes/abbreviations.fr.txt index 5dc47687..791ff32f 100644 --- a/includes/abbreviations.fr.txt +++ b/includes/abbreviations.fr.txt @@ -49,6 +49,8 @@ *[FAIs]: Fournisseurs d'accès internet *[JNI]: Interface native Java *[KYC]: Connaissance du client +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Configuration de la clé unifiée Linux (chiffrement complet du disque) *[MAC]: Contrôle d'accès aux médias *[MDAG]: Protection des applications Microsoft Defender @@ -62,6 +64,7 @@ *[OCSP]: Protocole d'état des certificats en ligne *[OEM]: Fabricant d'équipement d'origine *[OEMs]: Fabricants d'équipement d'origine +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Système d'exploitation *[OTP]: Mot de passe à usage unique *[OTPs]: Mots de passe à usage unique @@ -73,6 +76,7 @@ *[DCP]: Donnée à charactère personnel *[QNAME]: Nom qualifié *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[publication continue]: Mises à jour qui sont publiées fréquemment plutôt qu'à intervalles réguliers *[RSS]: Syndication vraiment simple *[SELinux]: Sécurité renforcée de Linux @@ -86,6 +90,8 @@ *[SaaS]: Logiciel en tant que service (logiciel cloud) *[SoC]: Système sur puce *[SSO]: Authentification unique +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protocole de contrôle de transmission *[TEE]: Environnement d'exécution de confiance *[TLS]: Sécurité de la couche transport diff --git a/includes/abbreviations.he.txt b/includes/abbreviations.he.txt index 3359a42f..b7f85ad2 100644 --- a/includes/abbreviations.he.txt +++ b/includes/abbreviations.he.txt @@ -49,6 +49,8 @@ *[ISPs]: ספקי שירותי אינטרנט *[JNI]: ממשק מקורי של Java *[KYC]: הכר את הלקוח שלך +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: מערכת הפעלה *[OTP]: סיסמה חד - פעמית *[OTPs]: סיסמאות חד פעמיות @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: סינדיקציה ממש פשוטה *[SELinux]: לינוקס משופרת באבטחה @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: מערכת על שבב *[SSO]: כניסה יחידה +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: אבטחת שכבת תעבורה diff --git a/includes/abbreviations.hi.txt b/includes/abbreviations.hi.txt index 03c478c7..4d2f9ada 100644 --- a/includes/abbreviations.hi.txt +++ b/includes/abbreviations.hi.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.hu.txt b/includes/abbreviations.hu.txt index 8b00efc9..f3f51511 100644 --- a/includes/abbreviations.hu.txt +++ b/includes/abbreviations.hu.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers - Internet Szolgáltatók *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Teljes Lemez Titkosítás) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol - Online Tanúsítvány Státusz Protokoll *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System - Operációs Rendszer *[OTP]: One-Time Password - Egyszer Használható Jelszó *[OTPs]: One-Time Passwords - Egyszer Használható Jelszavak @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information - Személyazonosításra Alkalmas Információ *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Gyakran és nem meghatározott időközönként kiadott frissítések *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (felhőszoftver) *[SoC]: System on Chip *[SSO]: Single sign-on - Egyszeri Bejelentkezés +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.id.txt b/includes/abbreviations.id.txt index a451dcce..14e56442 100644 --- a/includes/abbreviations.id.txt +++ b/includes/abbreviations.id.txt @@ -49,6 +49,8 @@ *[ISPs]: Penyedia Layanan Internet *[JNI]: Antarmuka Asli Java *[KYC]: Kenali Pelanggan Anda +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Pengaturan Kunci Terpadu Linux (Enkripsi Diska Penuh) *[MAC]: Kontrol Akses Media *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Protokol Status Sertifikat Daring *[OEM]: Produsen Peralatan Asli *[OEMs]: Produsen Peralatan Asli +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Sistem Operasi *[OTP]: Kata Sandi Sekali Pakai *[OTPs]: Kata Sandi Sekali Pakai @@ -73,6 +76,7 @@ *[PII]: Informasi Identifikasi Pribadi *[QNAME]: Nama yang Memenuhi Syarat *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rilis bergulir]: Pembaruan yang sering dirilis daripada interval yang ditetapkan *[RSS]: Really Simple Syndication *[SELinux]: Linux yang Ditingkatkan Keamanannya @@ -86,6 +90,8 @@ *[SaaS]: Perangkat lunak sebagai layanan (perangkat lunak awan) *[SoC]: Sistem pada Chip *[SSO]: Sistem masuk tunggal +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protokol Kontrol Transmisi *[TEE]: Lingkungan Eksekusi Terpercaya *[TLS]: Keamanan Lapisan Transportasi diff --git a/includes/abbreviations.it.txt b/includes/abbreviations.it.txt index b26d01a2..8192a725 100644 --- a/includes/abbreviations.it.txt +++ b/includes/abbreviations.it.txt @@ -49,6 +49,8 @@ *[ISPs]: Fornitori di servizi Internet *[JNI]: Java Native Interface *[KYC]: Conosci Il Tuo Cliente +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Crittografia dell'intero disco) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Produttore di apparecchiature originali *[OEMs]: Produttori di apparecchiature originali +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Sistema Operativo *[OTP]: Password monouso *[OTPs]: Password monouso @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Aggiornamenti che vengono rilasciati frequentemente anziché a intervalli prestabiliti *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (software cloud) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.ja.txt b/includes/abbreviations.ja.txt index 3328a367..36c03d40 100644 --- a/includes/abbreviations.ja.txt +++ b/includes/abbreviations.ja.txt @@ -49,6 +49,8 @@ *[ISPs]: インターネットサービスプロバイダー(Internet Service Providers) *[JNI]: Javaネイティブインターフェース *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: メディア・アクセス・コントロール *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: オンライン証明書ステータスプロトコル(Online Certificate Status Protocol) *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: オペレーティングシステム(Operating System) *[OTP]: ワンタイムパスワード *[OTPs]: ワンタイムパスワード @@ -73,6 +76,7 @@ *[PII]: 個人を特定できる情報(Personally Identifiable Information) *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[ローリング・リリース]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: サービスとしてのソフトウェア(クラウドソフトウェア、Software as a Service) *[SoC]: System on Chip *[SSO]: シングルサインオン(Single sign-on) +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: トランスミッション・コントロール・プロトコル(Transmission Control Protocol) *[TEE]: 信頼された実行環境(Trusted Execution Environment) *[TLS]: トランスポートレイヤーセキュリティー(Transport Layer Security) diff --git a/includes/abbreviations.ko.txt b/includes/abbreviations.ko.txt index 9ea2d20a..0ee3b456 100644 --- a/includes/abbreviations.ko.txt +++ b/includes/abbreviations.ko.txt @@ -49,6 +49,8 @@ *[ISPs]: 인터넷 서비스 제공자 (Internet service providers) *[JNI]: Java Native Interface *[KYC]: 고객 확인 제도 +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: 온라인 인증서 상태 프로토콜(Online Certificate Status Protocol) *[OEM]: 주문자 상표 부착 생산 *[OEMs]: 주문자 상표 부착 생산 +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: 운영 체제 *[OTP]: 일회용 비밀번호 *[OTPs]: 일회용 비밀번호 @@ -73,6 +76,7 @@ *[PII]: 개인 식별 정보(Personally Identifiable Information) *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[롤링 릴리스]: 정해진 주기가 아닌, 빈번히 릴리스되는 업데이트 *[RSS]: Really Simple Syndication *[SELinux]: 보안 강화 리눅스(Security-Enhanced Linux) @@ -86,6 +90,8 @@ *[SaaS]: 서비스형 소프트웨어 (클라우드 기반 소프트웨어) *[SoC]: System on Chip *[SSO]: Single Sign-On +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: 전송 제어 프로토콜 *[TEE]: 신뢰 실행 환경(Trusted Execution Environment) *[TLS]: 전송 계층 보안 diff --git a/includes/abbreviations.ku-IQ.txt b/includes/abbreviations.ku-IQ.txt index 03c478c7..4d2f9ada 100644 --- a/includes/abbreviations.ku-IQ.txt +++ b/includes/abbreviations.ku-IQ.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.nl.txt b/includes/abbreviations.nl.txt index 2238dfed..c7ea13b7 100644 --- a/includes/abbreviations.nl.txt +++ b/includes/abbreviations.nl.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer (ken uw klant) +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates die regelmatig worden uitgebracht in plaats van met vaste tussenpozen *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Eenmalige aanmelding +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.pl.txt b/includes/abbreviations.pl.txt index 4c1db6c2..0dab3592 100644 --- a/includes/abbreviations.pl.txt +++ b/includes/abbreviations.pl.txt @@ -49,6 +49,8 @@ *[ISPs]: Dostawcy usług internetowych *[JNI]: Natywny interfejs Java *[KYC]: Poznaj swojego klienta +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (szyfrowanie całego dysku) *[MAC]: Kontrola dostępu *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Stan certyfikatu online *[OEM]: Producent oryginalnego sprzętu *[OEMs]: Producenci oryginalnego sprzętu +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: System operacyjny *[OTP]: Jednorazowe hasło *[OTPs]: Jednorazowe hasła @@ -73,6 +76,7 @@ *[PII]: Informacje umożliwiające identyfikację osoby *[QNAME]: Nazwa kwalifikowana *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Aktualizacje, które są wydawane często, a nie w ustalonych odstępach czasu *[RSS]: Really Simple Syndication *[SELinux]: Linux o zwiększonym bezpieczeństwie @@ -86,6 +90,8 @@ *[SaaS]: Oprogramowanie jako usługa (oprogramowanie w chmurze) *[SoC]: System na chipie *[SSO]: Logowanie jednokrotne +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protokół sterowania transmisją *[TEE]: Środowisko zaufanego wykonania *[TLS]: Bezpieczeństwo warstwy transportowej diff --git a/includes/abbreviations.pt-BR.txt b/includes/abbreviations.pt-BR.txt index d6e694d5..e2a2f7b5 100644 --- a/includes/abbreviations.pt-BR.txt +++ b/includes/abbreviations.pt-BR.txt @@ -49,6 +49,8 @@ Caraterística digital herdada: Legado Digital refere-se a recursos que permitem *[ISPs]: Provedores de Internet *[JNI]: Java Native Interface *[KYC]: Conheça seu cliente +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Criptografia total de disco) *[MAC]: Controle de Acesso ao Meio *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ Caraterística digital herdada: Legado Digital refere-se a recursos que permitem *[OCSP]: Protocolo de Status de Certificado Online *[OEM]: Fabricante do Equipamento Original *[OEMs]: Fabricantes de Equipamentos Originais +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Sistema Operacional *[OTP]: Senha de uso único *[OTPs]: Senhas de uso único @@ -73,6 +76,7 @@ Caraterística digital herdada: Legado Digital refere-se a recursos que permitem *[PII]: Informações Pessoalmente Identificáveis *[QNAME]: Qualified Name *[QUIC]: Um protocolo de rede baseado no UDP, mas com o objetivo de combinar a velocidade do UDP com a confiabilidade do TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Atualizações lançadas com frequência em vez de intervalos *[RSS]: Sindicação bem simples *[SELinux]: Segurança aprimorada do Linux @@ -86,6 +90,8 @@ Caraterística digital herdada: Legado Digital refere-se a recursos que permitem *[SaaS]: Software como um Serviço (software em nuvem) *[SoC]: Sistema em um Chip *[SSO]: Login único +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protocolo de Controle de Transmissão *[TEE]: Ambiente de Execução Confiável *[TLS]: Segurança da Camada de Transporte diff --git a/includes/abbreviations.pt.txt b/includes/abbreviations.pt.txt index 343e955c..9cd2c537 100644 --- a/includes/abbreviations.pt.txt +++ b/includes/abbreviations.pt.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers – Fornecedores de Internet *[JNI]: Interface nativa Java *[KYC]: Conheça o seu cliente +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Configuração de chave unificada do Linux (encriptação de disco completo) *[MAC]: Controle de Acesso Mídia *[MDAG]: Proteção de aplicações do Microsoft Defender @@ -62,6 +64,7 @@ *[OCSP]: Protocolo de Status de Certificados Online *[OEM]: Fabricante de equipamento original *[OEMs]: Fabricantes de equipamento original +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Sistema operativo *[OTP]: Palavra-passe de utilização única *[OTPs]: Palavra-passe de utilização única @@ -73,6 +76,7 @@ *[PII]: Informações pessoais identificáveis *[QNAME]: Nome qualificado *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Atualizações que são disponibilizadas frequentemente em vez de a intervalos definidos *[RSS]: Really Simple Syndication *[SELinux]: Linux com segurança reforçada @@ -86,6 +90,8 @@ *[SaaS]: Software como serviço (software em nuvem) *[SoC]: Sistema em chip *[SSO]: Início de sessão único +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protocolo de Controlo de Transmissão *[TEE]: Ambiente de execução fiável *[TLS]: Segurança da camada de transporte diff --git a/includes/abbreviations.ru.txt b/includes/abbreviations.ru.txt index d5d2822f..b6c6abc2 100644 --- a/includes/abbreviations.ru.txt +++ b/includes/abbreviations.ru.txt @@ -49,6 +49,8 @@ *[ISPs]: Интернет-провайдеры *[JNI]: Нативный интерфейс Java *[KYC]: Знай своего клиента (Know Your Customer) +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Шифрование всего диска Linux (Linux Unified Key Setup) *[MAC]: Управление доступом к среде *[MDAG]: Защита приложений при помощи Microsoft Defender @@ -62,6 +64,7 @@ *[OCSP]: Протокол состояния сетевого сертификата *[OEM]: Оригинальный производитель оборудования *[OEMs]: Оригинальные производители оборудования +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[ОС]: Операционная система *[OTP]: Одноразовый пароль (One-Time Password) *[OTPs]: Одноразовые пароли (One-Time Passwords) @@ -73,6 +76,7 @@ *[ПД]: Персональные данные *[QNAME]: Квалифицированное имя элемента, атрибута или идентификатора в документе XML *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Обновления, которые выходят часто, а не через определенные промежутки времени *[RSS]: Способ распространения лент новостей или изменений в блогах *[SELinux]: Linux с повышенной безопасностью @@ -86,6 +90,8 @@ *[SaaS]: Программное обеспечение как услуга (облачное программное обеспечение) *[SoC]: Система на кристалле (System on Chip) *[SSO]: Единый вход +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Протокол управления передачей данных *[TEE]: Доверенная среда исполнения *[TLS]: Протокол защиты транспортного уровня diff --git a/includes/abbreviations.sv.txt b/includes/abbreviations.sv.txt index 28aaf44e..0fc5be21 100644 --- a/includes/abbreviations.sv.txt +++ b/includes/abbreviations.sv.txt @@ -49,6 +49,8 @@ *[ISPs]: Internetleverantör *[JNI]: Java inbyggt gränssnitt *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Kryptering) *[MAC]: Medieåtkomstkontroll *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Certifikatstatus online *[OEM]: Originalutrustningstillverkare *[OEMs]: Originalutrustningstillverkare +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operativsystem *[OTP]: Engångslösenord *[OTPs]: Engångslösenord @@ -73,6 +76,7 @@ *[PII]: Personligt identifierbar information *[QNAME]: Kvalificerat namn *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rullande utgåva]: Uppdateringar som släpps ofta i stället för med fasta intervaller *[RSS]: Riktigt enkel syndikering *[SELinux]: Linux med förbättrad säkerhet @@ -86,6 +90,8 @@ *[SaaS]: Programvara som tjänst (molnprogramvara) *[SoC]: System på chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Protokoll för överföringskontroll *[TEE]: Miljö för tillförlitlig utförande *[TLS]: Säkerhet för transportlager diff --git a/includes/abbreviations.tr.txt b/includes/abbreviations.tr.txt index 2134340c..a8e5fec7 100644 --- a/includes/abbreviations.tr.txt +++ b/includes/abbreviations.tr.txt @@ -49,6 +49,8 @@ *[İSS'ler]: İnternet Servis Sağlayıcıları *[JNI]: Java Yerel Arayüzü *[KYC]: Müşterini Tanı +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Birleşik Anahtar Kurulumu (Tam Disk Şifreleme) *[MAC]: Ortam Erişim Yönetimi *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Çevrimiçi Sertifika Durum Protokolü *[OEM]: Orijinal Ürün Üreticisi *[OEM'ler]: Orijinal Ürün Üreticiler +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[İS]: İşletim Sistemi *[OTP]: Tek Seferlik Parola *[OTP'ler]: Tek Seferlik Parolalar @@ -73,6 +76,7 @@ *[PII]: Kişiyi Tanımlamak İçin Kullanılan Bilgiler *[QNAME]: Nitelikli Ad *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[yuvarlanan sürüm]: Sabit aralıklar yerine sık sık yayınlanan güncellemeler *[RSS]: Gerçekten Basit Dağıtım *[SELinux]: Güvenliği Geliştirilmiş Linux @@ -86,6 +90,8 @@ *[SaaS]: Hizmet Olarak Yazılım (bulut yazılım) *[SoC]: Yongada Sistem *[TOA]: Tek Oturum Açma +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: İletim Kontrol Protokolü *[TEE]: Güvenilir Yürütme Ortamı *[TLS]: Taşıma Katmanı Güvenliği diff --git a/includes/abbreviations.uk.txt b/includes/abbreviations.uk.txt index e401904e..3fe6bb15 100644 --- a/includes/abbreviations.uk.txt +++ b/includes/abbreviations.uk.txt @@ -49,6 +49,8 @@ *[ISPs]: Інтернет-провайдери (Internet Service Providers) *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.vi.txt b/includes/abbreviations.vi.txt index 03c478c7..4d2f9ada 100644 --- a/includes/abbreviations.vi.txt +++ b/includes/abbreviations.vi.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: Original Equipment Manufacturer *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: Operating System *[OTP]: One-Time Password *[OTPs]: One-Time Passwords @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: Single sign-on +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security diff --git a/includes/abbreviations.zh-Hant.txt b/includes/abbreviations.zh-Hant.txt index 3d1aef85..e75ef153 100644 --- a/includes/abbreviations.zh-Hant.txt +++ b/includes/abbreviations.zh-Hant.txt @@ -49,6 +49,8 @@ *[ISPs]: 網際網路服務提供商 *[JNI]: Java 原生介面 *[KYC]: 客戶身分審查 +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux 統一金鑰設定 (全磁碟加密) *[MAC]: 媒體存取控制 *[MDAG]: Microsoft Defender 應用程式防護 @@ -62,6 +64,7 @@ *[OCSP]: 線上憑證狀態協定 *[OEM]: 原始設備製造商 *[OEMs]: 原始設備製造商 +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: 操作系統 *[OTP]: 一次性密碼 *[OTPs]: 一次性密碼 @@ -73,6 +76,7 @@ *[PII]: 個人識別資訊 *[QNAME]: 限定名稱 *[QUIC]: 以 UDP 為基礎的網路協定,旨在結合 UDP 的速度與 TCP 的可靠性。 +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[滾動式更新]: 頻繁釋出的更新,而非在固定的時間間隔內進行更新。 *[RSS]: 簡易資訊聚合格式 *[SELinux]: 安全增強型 Linux @@ -86,6 +90,8 @@ *[SaaS]: 軟體即服務 (雲端軟體) *[SoC]: 系統晶片 *[SSO]: 單一登入 +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: 傳輸控制通訊協定 *[TEE]: 受信任執行環境 *[TLS]: 傳輸層安全性 diff --git a/includes/abbreviations.zh.txt b/includes/abbreviations.zh.txt index cdcb1903..cdfaf83f 100644 --- a/includes/abbreviations.zh.txt +++ b/includes/abbreviations.zh.txt @@ -49,6 +49,8 @@ *[ISPs]: Internet Service Providers *[JNI]: Java Native Interface *[KYC]: Know Your Customer +*[LLaVA]: Large Language and Vision Assistant (multimodal AI model) +*[LLMs]: Large Language Models (AI models such as ChatGPT) *[LUKS]: Linux Unified Key Setup (Full-Disk Encryption) *[MAC]: Media Access Control *[MDAG]: Microsoft Defender Application Guard @@ -62,6 +64,7 @@ *[OCSP]: Online Certificate Status Protocol *[OEM]: 原始设备制造商 *[OEMs]: Original Equipment Manufacturers +*[open-weights]: An open weights-model is an AI model that anyone can download and use, but for which the underlying training data and/or algorithms are proprietary. *[OS]: 操作系统 *[OTP]: 一次性口令 *[OTPs]: 一次性口令 @@ -73,6 +76,7 @@ *[PII]: Personally Identifiable Information *[QNAME]: Qualified Name *[QUIC]: A network protocol based on UDP, but aiming to combine the speed of UDP with the reliability of TCP. +*[rate limits]: Rate limits are restrictions that a service imposes on the number of times a user can access their services within a specified period of time. *[rolling release]: Updates which are released frequently rather than set intervals *[RSS]: Really Simple Syndication *[SELinux]: Security-Enhanced Linux @@ -86,6 +90,8 @@ *[SaaS]: Software as a Service (cloud software) *[SoC]: System on Chip *[SSO]: 单点登录 +*[system prompt]: The system prompt of an AI chat is the general instructions given by a human to guide how it should operate. +*[temperature]: AI temperature is a parameter used in AI models to control the level of randomness and creativity in the generated text. *[TCP]: Transmission Control Protocol *[TEE]: Trusted Execution Environment *[TLS]: Transport Layer Security