feat: Replace Linkwarden with Karakeep, add Ollama LLM server, add config examples

- Replace Linkwarden with Karakeep for AI-powered bookmarking
  - Supports links, notes, images, PDFs
  - AI auto-tagging with Ollama integration
  - Browser extensions and mobile apps
  - Full-text search with Meilisearch

- Add Ollama for local LLM inference
  - Run Llama, Mistral, CodeLlama locally
  - GPU acceleration support (GTX 1070)
  - OpenAI-compatible API
  - Integrates with Karakeep for AI features

- Add example configuration files for services
  - Sonarr: config.xml.example
  - Radarr: config.xml.example
  - SABnzbd: sabnzbd.ini.example
  - qBittorrent: qBittorrent.conf.example
  - Vikunja: config.yml.example
  - FreshRSS: config.php.example

- Fix incomplete FreshRSS compose.yaml

- Update README with new services and deployment instructions
This commit is contained in:
Claude 2025-11-09 06:16:27 +00:00
parent 9807ce1627
commit 9fbd003798
No known key found for this signature in database
18 changed files with 2182 additions and 126 deletions

View file

@ -43,7 +43,8 @@ compose/
└── services/ # Utility services └── services/ # Utility services
├── homarr/ # Dashboard (home.fig.systems) ├── homarr/ # Dashboard (home.fig.systems)
├── backrest/ # Backup manager (backup.fig.systems) ├── backrest/ # Backup manager (backup.fig.systems)
├── linkwarden/ # Bookmark manager (links.fig.systems) ├── karakeep/ # Bookmark manager with AI (links.fig.systems)
├── ollama/ # Local LLM server (ollama.fig.systems)
├── vikunja/ # Task management (tasks.fig.systems) ├── vikunja/ # Task management (tasks.fig.systems)
├── lubelogger/ # Vehicle tracker (garage.fig.systems) ├── lubelogger/ # Vehicle tracker (garage.fig.systems)
├── calibre-web/ # Ebook library (books.fig.systems) ├── calibre-web/ # Ebook library (books.fig.systems)
@ -82,7 +83,8 @@ All services are accessible via:
| SABnzbd | sabnzbd.fig.systems | ✅ | | SABnzbd | sabnzbd.fig.systems | ✅ |
| qBittorrent | qbt.fig.systems | ✅ | | qBittorrent | qbt.fig.systems | ✅ |
| Profilarr | profilarr.fig.systems | ✅ | | Profilarr | profilarr.fig.systems | ✅ |
| Linkwarden | links.fig.systems | ✅ | | Karakeep | links.fig.systems | ✅ |
| Ollama (API) | ollama.fig.systems | ✅ |
| Vikunja | tasks.fig.systems | ✅ | | Vikunja | tasks.fig.systems | ✅ |
| LubeLogger | garage.fig.systems | ✅ | | LubeLogger | garage.fig.systems | ✅ |
| Calibre-web | books.fig.systems | ✅ | | Calibre-web | books.fig.systems | ✅ |
@ -164,7 +166,8 @@ cd compose/media/automation/recyclarr && docker compose up -d
cd compose/media/automation/profilarr && docker compose up -d cd compose/media/automation/profilarr && docker compose up -d
# Utility services # Utility services
cd compose/services/linkwarden && docker compose up -d cd compose/services/karakeep && docker compose up -d
cd compose/services/ollama && docker compose up -d
cd compose/services/vikunja && docker compose up -d cd compose/services/vikunja && docker compose up -d
cd compose/services/homarr && docker compose up -d cd compose/services/homarr && docker compose up -d
cd compose/services/backrest && docker compose up -d cd compose/services/backrest && docker compose up -d

View file

@ -0,0 +1,200 @@
# qBittorrent Configuration Example
# This file will be auto-generated on first run
# Copy to ./config/qBittorrent/qBittorrent.conf and modify as needed
# Docs: https://github.com/qbittorrent/qBittorrent/wiki
[Application]
# File Logger
FileLogger\Enabled=true
FileLogger\Path=/config/qBittorrent/logs
FileLogger\Backup=true
FileLogger\DeleteOld=true
FileLogger\MaxSize=6MiB
FileLogger\Age=1
FileLogger\AgeType=1
# Memory
MemoryWorkingSetLimit=512
[BitTorrent]
# Session Settings
Session\DefaultSavePath=/downloads
Session\TempPath=/incomplete
Session\TempPathEnabled=true
# Port for incoming connections
Session\Port=6881
# Use UPnP/NAT-PMP
Session\UseUPnP=false
# Encryption mode
Session\Encryption=1
# 0 = Prefer encryption
# 1 = Require encryption
# 2 = Disable encryption
# Anonymous mode
Session\AnonymousMode=false
# Max connections
Session\MaxConnections=500
Session\MaxConnectionsPerTorrent=100
Session\MaxUploads=20
Session\MaxUploadsPerTorrent=4
# DHT
Session\DHTEnabled=true
Session\PeXEnabled=true
Session\LSDEnabled=true
# Queuing
Session\QueueingSystemEnabled=true
Session\MaxActiveDownloads=5
Session\MaxActiveTorrents=10
Session\MaxActiveUploads=5
# Seeding limits
Session\GlobalMaxSeedingMinutes=-1
Session\MaxRatioAction=0
# 0 = Pause torrent
# 1 = Remove torrent
Session\MaxRatio=2.0
# Torrent tracking
Session\AddTrackersEnabled=true
Session\AdditionalTrackers=
# Categories
Session\SubcategoriesEnabled=true
# Performance
Session\BTProtocol=Both
# TCP, UTP, Both
Session\uTPRateLimited=true
Session\DiskCacheSize=64
Session\DiskCacheTTL=60
# Speed limits (in KiB/s, 0 = unlimited)
Session\GlobalDLSpeedLimit=0
Session\GlobalUPSpeedLimit=0
# Alternative speed limits (scheduled)
Session\AltGlobalDLSpeedLimit=512
Session\AltGlobalUPSpeedLimit=256
Session\BandwidthSchedulerEnabled=false
# IP Filtering
Session\IPFilteringEnabled=false
Session\IPFilterFile=
# Proxy
Session\ProxyType=None
# Options: None, HTTP, SOCKS5, SOCKS4
Session\ProxyIP=
Session\ProxyPort=8080
Session\ProxyPeerConnections=false
Session\ProxyTorrentOnly=false
[LegalNotice]
Accepted=true
[Preferences]
# Downloads
Downloads\SavePath=/downloads
Downloads\TempPath=/incomplete
Downloads\TempPathEnabled=true
Downloads\ScanDirsV2=
Downloads\FinishedTorrentExportDir=
Downloads\PreAllocation=false
Downloads\UseIncompleteExtension=true
# Connection
Connection\PortRangeMin=6881
Connection\PortRangeMax=6881
Connection\UPnP=false
Connection\GlobalDLLimitAlt=512
Connection\GlobalUPLimitAlt=256
# Speed
Bittorrent\MaxConnecs=500
Bittorrent\MaxConnecsPerTorrent=100
Bittorrent\MaxUploads=20
Bittorrent\MaxUploadsPerTorrent=4
# Queue
Queueing\QueueingEnabled=true
Queueing\MaxActiveDownloads=5
Queueing\MaxActiveTorrents=10
Queueing\MaxActiveUploads=5
Queueing\IgnoreSlowTorrents=false
Queueing\SlowTorrentsDownloadRate=2
Queueing\SlowTorrentsUploadRate=2
# Scheduler
Scheduler\Enabled=false
Scheduler\days=EveryDay
Scheduler\start_time=@Variant(\0\0\0\xf\x4J\xa2\0)
Scheduler\end_time=@Variant(\0\0\0\xf\x1\x90\x1\0)
# RSS
RSS\AutoDownloader\DownloadRepacks=true
RSS\AutoDownloader\SmartEpisodeFilter=s(\\d+)e(\\d+), (\\d+)x(\\d+), "(\\d{4}[.\\-]\\d{1,2}[.\\-]\\d{1,2})", "(\\d{1,2}[.\\-]\\d{1,2}[.\\-]\\d{4})"
# Web UI
WebUI\Enabled=true
WebUI\LocalHostAuth=false
WebUI\Port=8080
WebUI\Address=*
WebUI\ServerDomains=*
WebUI\UseUPnP=false
# Web UI Authentication
WebUI\Username=admin
WebUI\Password_PBKDF2=GENERATED_ON_FIRST_RUN
# Security
WebUI\CSRFProtection=true
WebUI\SecureCookie=true
WebUI\ClickjackingProtection=true
WebUI\HostHeaderValidation=true
# Custom HTTP Headers
WebUI\CustomHTTPHeaders=
WebUI\CustomHTTPHeadersEnabled=false
# Reverse Proxy
WebUI\ReverseProxySupportEnabled=true
WebUI\TrustedReverseProxiesList=
# Alternative WebUI
WebUI\AlternativeUIEnabled=false
WebUI\RootFolder=
# Locale
General\Locale=en
WebUI\UseCustomHTTPHeaders=false
# Advanced
Advanced\RecheckOnCompletion=false
Advanced\AnonymousMode=false
Advanced\SuperSeeding=false
Advanced\IgnoreLimitsLAN=true
Advanced\IncludeOverhead=false
Advanced\AnnounceToAllTrackers=false
Advanced\AnnounceToAllTiers=true
# Tracker
Advanced\trackerPort=9000
# Embedded tracker
Advanced\trackerEnabled=false
# Logging
AdvancedSettings\LogFileEnabled=true
[RSS]
AutoDownloader\Enabled=false

View file

@ -0,0 +1,50 @@
<Config>
<!-- Radarr Configuration Example -->
<!-- This file will be auto-generated on first run -->
<!-- Copy to ./config/config.xml and modify as needed -->
<Port>7878</Port>
<SslPort>9897</SslPort>
<EnableSsl>False</EnableSsl>
<LaunchBrowser>False</LaunchBrowser>
<ApiKey>GENERATED_ON_FIRST_RUN</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<!-- Options: None, Basic, Forms, External -->
<!-- Use External when behind Traefik with SSO -->
<UrlBase></UrlBase>
<!-- Set to /radarr if using a path-based proxy -->
<UpdateMechanism>Docker</UpdateMechanism>
<Branch>master</Branch>
<!-- Options: master (stable), develop (beta), nightly -->
<LogLevel>info</LogLevel>
<!-- Options: trace, debug, info, warn, error, fatal -->
<!-- Analytics (optional) -->
<AnalyticsEnabled>False</AnalyticsEnabled>
<!-- Backup -->
<BackupFolder>/config/Backups</BackupFolder>
<BackupInterval>7</BackupInterval>
<BackupRetention>28</BackupRetention>
<!-- Proxy Settings (if needed) -->
<ProxyEnabled>False</ProxyEnabled>
<ProxyType>Http</ProxyType>
<ProxyHostname></ProxyHostname>
<ProxyPort>8080</ProxyPort>
<ProxyUsername></ProxyUsername>
<ProxyPassword></ProxyPassword>
<ProxyBypassFilter></ProxyBypassFilter>
<ProxyBypassLocalAddresses>True</ProxyBypassLocalAddresses>
<!-- Radarr-specific settings -->
<MinimumAge>0</MinimumAge>
<!-- Delay before grabbing release (in minutes) -->
<Retention>0</Retention>
<!-- Maximum age of usenet posts (0 = unlimited) -->
</Config>

View file

@ -0,0 +1,137 @@
# SABnzbd Configuration Example
# This file will be auto-generated on first run
# Copy to ./config/sabnzbd.ini and modify as needed
# Docs: https://sabnzbd.org/wiki/configuration/4.3/
[misc]
# Host and Port
host = 0.0.0.0
port = 8080
# URL Base (if using path-based proxy)
url_base =
# API Key (generated on first run)
api_key = GENERATED_ON_FIRST_RUN
nzb_key = GENERATED_ON_FIRST_RUN
# Authentication
# Use 'None' when behind Traefik with SSO
username =
password =
# Directories
download_dir = /incomplete
complete_dir = /complete
dirscan_dir =
script_dir =
# Performance
cache_limit = 500M
article_cache_max = 500M
# Adjust based on available RAM
# Download Settings
bandwidth_max =
bandwidth_perc = 100
# 0 = unlimited bandwidth
# Post-processing
enable_all_par = 0
# 0 = Download only needed par2 files
# 1 = Download all par2 files
par2_multicore = 1
# Use multiple CPU cores for par2 repair
nice =
ionice =
# Unpacking
enable_unzip = 1
enable_7zip = 1
enable_filejoin = 1
enable_tsjoin = 0
enable_par_cleanup = 1
safe_postproc = 1
# Quota
quota_size =
quota_day =
quota_resume = 0
quota_period = m
# Scheduling
schedlines =
# Format: hour minute day_of_week action
# SSL/TLS for Usenet servers
ssl_type = v23
ssl_ciphers =
# IPv6
enable_ipv6 = 1
ipv6_servers = 0
# Logging
log_level = 1
# 0 = No logging
# 1 = Errors/warnings (default)
# 2 = Info
max_log_size = 5242880
log_backups = 5
# Email notifications (optional)
email_endjob = 0
email_full = 0
email_server =
email_to =
email_from =
email_account =
email_pwd =
# RSS (optional)
rss_rate = 60
# External scripts (optional)
pre_script =
post_script =
# Misc
permissions =
folder_rename = 1
replace_spaces = 0
replace_dots = 0
auto_browser = 0
propagation_delay = 0
[servers]
# Usenet servers configured via web UI
# Or add manually here:
# [[server_name]]
# host = news.example.com
# port = 563
# ssl = 1
# username = your_username
# password = your_password
# connections = 20
# priority = 0
# retention = 3000
# enable = 1
[categories]
# Categories configured via web UI
# Default categories: Movies, TV, Audio, Software
[[*]]
name = *
order = 0
pp = 3
# 0 = Download
# 1 = +Repair
# 2 = +Unpack
# 3 = +Delete (recommended)
script = Default
dir =
newzbin =
priority = 0

View file

@ -0,0 +1,43 @@
<Config>
<!-- Sonarr Configuration Example -->
<!-- This file will be auto-generated on first run -->
<!-- Copy to ./config/config.xml and modify as needed -->
<Port>8989</Port>
<SslPort>9898</SslPort>
<EnableSsl>False</EnableSsl>
<LaunchBrowser>False</LaunchBrowser>
<ApiKey>GENERATED_ON_FIRST_RUN</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<!-- Options: None, Basic, Forms, External -->
<!-- Use External when behind Traefik with SSO -->
<UrlBase></UrlBase>
<!-- Set to /sonarr if using a path-based proxy -->
<UpdateMechanism>Docker</UpdateMechanism>
<Branch>main</Branch>
<!-- Options: main (stable), develop (beta) -->
<LogLevel>info</LogLevel>
<!-- Options: trace, debug, info, warn, error, fatal -->
<!-- Analytics (optional) -->
<AnalyticsEnabled>False</AnalyticsEnabled>
<!-- Backup -->
<BackupFolder>/config/Backups</BackupFolder>
<BackupInterval>7</BackupInterval>
<BackupRetention>28</BackupRetention>
<!-- Proxy Settings (if needed) -->
<ProxyEnabled>False</ProxyEnabled>
<ProxyType>Http</ProxyType>
<ProxyHostname></ProxyHostname>
<ProxyPort>8080</ProxyPort>
<ProxyUsername></ProxyUsername>
<ProxyPassword></ProxyPassword>
<ProxyBypassFilter></ProxyBypassFilter>
<ProxyBypassLocalAddresses>True</ProxyBypassLocalAddresses>
</Config>

View file

@ -5,7 +5,36 @@ services:
freshrss: freshrss:
container_name: freshrss container_name: freshrss
image: lscr.io/linuxserver/freshrss:latest image: lscr.io/linuxserver/freshrss:latest
restart: unless-stopped
env_file: env_file:
- .env - .env
volumes:
- ./config:/config
networks:
- homelab
labels:
# Traefik
traefik.enable: true
traefik.docker.network: homelab
# Web UI
traefik.http.routers.freshrss.rule: Host(`rss.fig.systems`) || Host(`rss.edfig.dev`)
traefik.http.routers.freshrss.entrypoints: websecure
traefik.http.routers.freshrss.tls.certresolver: letsencrypt
traefik.http.services.freshrss.loadbalancer.server.port: 80
# SSO Protection
traefik.http.routers.freshrss.middlewares: tinyauth
# Homarr Discovery
homarr.name: FreshRSS
homarr.group: Services
homarr.icon: mdi:rss
networks:
homelab:
external: true

View file

@ -0,0 +1,130 @@
<?php
/**
* FreshRSS Configuration Example
* Copy to ./config/www/freshrss/data/config.php
* Docs: https://freshrss.github.io/FreshRSS/en/admins/03_Troubleshooting.html
*/
return array(
// Environment (production or development)
'environment' => 'production',
// Base URL
'base_url' => 'https://rss.fig.systems',
// Database type (sqlite, mysql, pgsql)
'db' => array(
'type' => 'sqlite',
'host' => '',
'user' => '',
'password' => '',
'base' => 'freshrss',
// For MySQL/PostgreSQL:
// 'type' => 'mysql',
// 'host' => 'localhost:3306',
// 'user' => 'freshrss',
// 'password' => 'changeme',
// 'base' => 'freshrss',
'prefix' => 'freshrss_',
'pdo_options' => array(),
),
// Salt for password hashing (auto-generated)
'salt' => 'GENERATED_ON_FIRST_RUN',
// Authentication method
// Options: form, http_auth, none
'auth_type' => 'form',
// Use Form auth when behind Traefik with SSO
// Allow self-registration
'allow_anonymous' => false,
'allow_anonymous_refresh' => false,
// Default language
'language' => 'en',
// Theme
'theme' => 'Origine',
// Timezone
'default_timezone' => 'America/Los_Angeles',
// Auto-load more articles when scrolling
'auto_load_more' => true,
// Articles per page
'posts_per_page' => 100,
// Old articles (keep for X months)
'old_entries' => 3,
// Caching
'cache' => array(
'enabled' => true,
'duration' => 3600, // seconds
),
// Simplify HTML in articles
'simplify_html' => false,
// Disable update checking
'disable_update_check' => true,
// API settings
'api_enabled' => true,
// Google Reader API compatibility
'fever_api' => true,
// Shortcuts
'shortcuts' => array(
'mark_read' => 'r',
'mark_favorite' => 'f',
'go_website' => 'v',
'next_entry' => 'j',
'prev_entry' => 'k',
'first_entry' => 'shift+k',
'last_entry' => 'shift+j',
'collapse_entry' => 'c',
'load_more' => 'm',
'auto_share' => 's',
'focus_search' => '/',
'user_filter' => 'u',
'help' => 'h',
'close_dropdown' => 'esc',
'prev_feed' => 'shift+up',
'next_feed' => 'shift+down',
),
// Extensions
'extensions_enabled' => array(),
// Proxy (if needed)
'proxy' => array(
'address' => '',
'port' => '',
'type' => '',
'username' => '',
'password' => '',
),
// Limits
'limits' => array(
// Max feed checks per user per hour
'max_feeds_refresh_per_user_per_hour' => 10,
// Max articles per feed
'max_articles_per_feed' => 10000,
// Max registrations per IP per day
'max_registrations_per_ip_per_day' => 5,
),
// Logging
'logging' => array(
'level' => 'warning',
// Options: emergency, alert, critical, error, warning, notice, info, debug
),
);

View file

@ -0,0 +1,56 @@
# Karakeep Configuration
# Docs: https://docs.karakeep.app
# NextAuth Configuration
NEXTAUTH_URL=https://links.fig.systems
# Generate with: openssl rand -base64 36
# Example format: aB2cD4eF6gH8iJ0kL2mN4oP6qR8sT0uV2wX4yZ6aB8cD0eF2gH4i
NEXTAUTH_SECRET=changeme_please_set_random_secret_key
# Meilisearch Master Key
# Generate with: openssl rand -base64 36
# Example format: gH4iJ6kL8mN0oP2qR4sT6uV8wX0yZ2aB4cD6eF8gH0iJ2kL4mN6o
MEILI_MASTER_KEY=changeme_please_set_meili_master_key
# Data Directory
DATADIR=/data
# Chrome Service URL (for web archiving)
BROWSER_WEB_URL=http://karakeep-chrome:9222
# Meilisearch URL
MEILI_ADDR=http://karakeep-meilisearch:7700
# Timezone
TZ=America/Los_Angeles
# Optional: Disable public signups
# DISABLE_SIGNUPS=true
# Optional: Maximum file size for uploads (in bytes, default: 100MB)
# MAX_ASSET_SIZE_MB=100
# Optional: Enable OCR for images
# OCR_LANGS=eng,spa,fra,deu
# Optional: Ollama Integration (for AI features with local models)
# Uncomment these after deploying Ollama service
# OLLAMA_BASE_URL=http://ollama:11434
# INFERENCE_TEXT_MODEL=llama3.2:3b
# INFERENCE_IMAGE_MODEL=llava:7b
# INFERENCE_LANG=en
# Optional: OpenAI Integration (for AI features via cloud)
# OPENAI_API_KEY=sk-...
# OPENAI_BASE_URL=https://api.openai.com/v1
# INFERENCE_TEXT_MODEL=gpt-4o-mini
# INFERENCE_IMAGE_MODEL=gpt-4o-mini
# Optional: OpenRouter Integration (for AI features)
# OPENAI_API_KEY=sk-or-v1-...
# OPENAI_BASE_URL=https://openrouter.ai/api/v1
# INFERENCE_TEXT_MODEL=anthropic/claude-3.5-sonnet
# INFERENCE_IMAGE_MODEL=anthropic/claude-3.5-sonnet
# Optional: Logging
# LOG_LEVEL=info

6
compose/services/karakeep/.gitignore vendored Normal file
View file

@ -0,0 +1,6 @@
# Karakeep data
data/
meili_data/
# Keep .env.example if created
!.env.example

View file

@ -0,0 +1,543 @@
# Karakeep - Bookmark Everything App
AI-powered bookmark manager for links, notes, images, and PDFs with automatic tagging and full-text search.
## Overview
**Karakeep** (previously known as Hoarder) is a self-hostable bookmark-everything app:
- ✅ **Bookmark Everything**: Links, notes, images, PDFs
- ✅ **AI-Powered**: Automatic tagging and summarization
- ✅ **Full-Text Search**: Find anything instantly with Meilisearch
- ✅ **Web Archiving**: Save complete webpages (full page archive)
- ✅ **Browser Extensions**: Chrome and Firefox support
- ✅ **Mobile Apps**: iOS and Android apps available
- ✅ **Ollama Support**: Use local AI models (no cloud required!)
- ✅ **OCR**: Extract text from images
- ✅ **Self-Hosted**: Full control of your data
## Quick Start
### 1. Configure Secrets
```bash
cd ~/homelab/compose/services/karakeep
# Edit .env and update:
# - NEXTAUTH_SECRET (generate with: openssl rand -base64 36)
# - MEILI_MASTER_KEY (generate with: openssl rand -base64 36)
nano .env
```
### 2. Deploy
```bash
docker compose up -d
```
### 3. Access
Go to: **https://links.fig.systems**
**First-time setup:**
1. Create your admin account
2. Start bookmarking!
## Features
### Bookmark Types
**1. Web Links**
- Save any URL
- Automatic screenshot capture
- Full webpage archiving
- Extract title, description, favicon
- AI-generated summary and tags
**2. Notes**
- Quick text notes
- Markdown support
- AI-powered categorization
- Full-text searchable
**3. Images**
- Upload images directly
- OCR text extraction (if enabled)
- AI-based tagging
- Image search
**4. PDFs**
- Upload PDF documents
- Full-text indexing
- Searchable content
### AI Features
Karakeep can use AI to automatically:
- **Tag** your bookmarks
- **Summarize** web content
- **Extract** key information
- **Organize** by category
**Three AI options:**
**1. Ollama (Recommended - Local & Free)**
```env
# In .env, uncomment:
OLLAMA_BASE_URL=http://ollama:11434
INFERENCE_TEXT_MODEL=llama3.2:3b
INFERENCE_IMAGE_MODEL=llava:7b
```
**2. OpenAI**
```env
OPENAI_API_KEY=sk-...
OPENAI_BASE_URL=https://api.openai.com/v1
INFERENCE_TEXT_MODEL=gpt-4o-mini
```
**3. OpenRouter (multiple providers)**
```env
OPENAI_API_KEY=sk-or-v1-...
OPENAI_BASE_URL=https://openrouter.ai/api/v1
INFERENCE_TEXT_MODEL=anthropic/claude-3.5-sonnet
```
### Web Archiving
Karakeep saves complete web pages for offline viewing:
- **Full HTML archive**
- **Screenshots** of the page
- **Extracted text** for search
- **Works offline** - view archived pages anytime
### Search
Powered by Meilisearch:
- **Instant** full-text search
- **Fuzzy matching** - finds similar terms
- **Filter by** type, tags, dates
- **Search across** titles, content, tags, notes
### Browser Extensions
**Install extensions:**
- [Chrome Web Store](https://chromewebstore.google.com/detail/karakeep/kbkejgonjhbmhcaofkhdegeoeoemgkdm)
- [Firefox Add-ons](https://addons.mozilla.org/en-US/firefox/addon/karakeep/)
**Configure extension:**
1. Install extension
2. Click extension icon
3. Enter server URL: `https://links.fig.systems`
4. Login with your credentials
5. Save bookmarks from any page!
### Mobile Apps
**Download apps:**
- [iOS App Store](https://apps.apple.com/app/karakeep/id6479258022)
- [Android Google Play](https://play.google.com/store/apps/details?id=app.karakeep.mobile)
**Setup:**
1. Install app
2. Open app
3. Enter server: `https://links.fig.systems`
4. Login
5. Bookmark on the go!
## Configuration
### Basic Settings
**Disable public signups:**
```env
DISABLE_SIGNUPS=true
```
**Set max file size (100MB default):**
```env
MAX_ASSET_SIZE_MB=100
```
**Enable OCR for multiple languages:**
```env
OCR_LANGS=eng,spa,fra,deu
```
### Ollama Integration
**Prerequisites:**
1. Deploy Ollama service (see `compose/services/ollama/`)
2. Pull models: `docker exec ollama ollama pull llama3.2:3b`
**Enable in Karakeep:**
```env
# In karakeep/.env
OLLAMA_BASE_URL=http://ollama:11434
INFERENCE_TEXT_MODEL=llama3.2:3b
INFERENCE_IMAGE_MODEL=llava:7b
INFERENCE_LANG=en
```
**Restart:**
```bash
docker compose restart
```
**Recommended models:**
- **Text**: llama3.2:3b (fast, good quality)
- **Images**: llava:7b (vision model)
- **Advanced**: llama3.3:70b (slower, better results)
### Advanced Settings
**Custom logging:**
```env
LOG_LEVEL=debug # Options: debug, info, warn, error
```
**Custom data directory:**
```env
DATADIR=/custom/path
```
**Chrome timeout (for slow sites):**
```env
# Add to compose.yaml environment section
BROWSER_TIMEOUT=60000 # 60 seconds
```
## Usage Workflows
### 1. Bookmark a Website
**Via Browser:**
1. Click Karakeep extension
2. Bookmark opens automatically
3. AI generates tags and summary
4. Edit tags/notes if needed
5. Save
**Via Mobile:**
1. Open share menu
2. Select Karakeep
3. Bookmark saved
**Manually:**
1. Open Karakeep
2. Click "+" button
3. Paste URL
4. Click Save
### 2. Quick Note
1. Open Karakeep
2. Click "+" → "Note"
3. Type your note
4. AI auto-tags
5. Save
### 3. Upload Image
1. Click "+" → "Image"
2. Upload image file
3. OCR extracts text (if enabled)
4. AI generates tags
5. Save
### 4. Search Everything
**Simple search:**
- Type in search box
- Results appear instantly
**Advanced search:**
- Filter by type (links, notes, images)
- Filter by tags
- Filter by date range
- Sort by relevance or date
### 5. Organize with Tags
**Auto-tags:**
- AI generates tags automatically
- Based on content analysis
- Can be edited/removed
**Manual tags:**
- Add your own tags
- Create tag hierarchies
- Color-code tags
**Tag management:**
- Rename tags globally
- Merge duplicate tags
- Delete unused tags
## Browser Extension Usage
### Quick Bookmark
1. **Visit any page**
2. **Click extension icon** (or keyboard shortcut)
3. **Automatically saved** with:
- URL
- Title
- Screenshot
- Full page archive
- AI tags and summary
### Save Selection
1. **Highlight text** on any page
2. **Right-click** → "Save to Karakeep"
3. **Saves as note** with source URL
### Save Image
1. **Right-click image**
2. Select "Save to Karakeep"
3. **Image uploaded** with AI tags
## Mobile App Features
- **Share from any app** to Karakeep
- **Quick capture** - bookmark in seconds
- **Offline access** to archived content
- **Search** your entire collection
- **Browse by tags**
- **Dark mode** support
## Data Management
### Backup
**Important data locations:**
```bash
compose/services/karakeep/
├── data/ # Uploaded files, archives
└── meili_data/ # Search index
```
**Backup script:**
```bash
#!/bin/bash
cd ~/homelab/compose/services/karakeep
tar czf karakeep-backup-$(date +%Y%m%d).tar.gz ./data ./meili_data
```
### Export
**Export bookmarks:**
1. Settings → Export
2. Choose format:
- JSON (complete data)
- HTML (browser-compatible)
- CSV (spreadsheet)
3. Download
### Import
**Import from other services:**
1. Settings → Import
2. Select source:
- Browser bookmarks (HTML)
- Pocket
- Raindrop.io
- Omnivore
- Instapaper
3. Upload file
4. Karakeep processes and imports
## Troubleshooting
### Karakeep won't start
**Check logs:**
```bash
docker logs karakeep
docker logs karakeep-chrome
docker logs karakeep-meilisearch
```
**Common issues:**
- Missing `NEXTAUTH_SECRET` in `.env`
- Missing `MEILI_MASTER_KEY` in `.env`
- Services not on `karakeep_internal` network
### Bookmarks not saving
**Check chrome service:**
```bash
docker logs karakeep-chrome
```
**Verify chrome is accessible:**
```bash
docker exec karakeep curl http://karakeep-chrome:9222
```
**Increase timeout:**
```env
# Add to .env
BROWSER_TIMEOUT=60000
```
### Search not working
**Rebuild search index:**
```bash
# Stop services
docker compose down
# Remove search data
rm -rf ./meili_data
# Restart (index rebuilds automatically)
docker compose up -d
```
**Check Meilisearch:**
```bash
docker logs karakeep-meilisearch
```
### AI features not working
**With Ollama:**
```bash
# Verify Ollama is running
docker ps | grep ollama
# Test Ollama connection
docker exec karakeep curl http://ollama:11434
# Check models are pulled
docker exec ollama ollama list
```
**With OpenAI/OpenRouter:**
- Verify API key is correct
- Check API balance/credits
- Review logs for error messages
### Extension can't connect
**Verify server URL:**
- Must be `https://links.fig.systems`
- Not `http://` or `localhost`
**Check CORS:**
```env
# Add to .env if needed
CORS_ALLOW_ORIGINS=https://links.fig.systems
```
**Clear extension data:**
1. Extension settings
2. Logout
3. Clear extension storage
4. Login again
### Mobile app issues
**Can't connect:**
- Use full HTTPS URL
- Ensure server is accessible externally
- Check firewall rules
**Slow performance:**
- Check network speed
- Reduce image quality in app settings
- Enable "Low data mode"
## Performance Optimization
### For Large Collections (10,000+ bookmarks)
**Increase Meilisearch RAM:**
```yaml
# In compose.yaml, add to karakeep-meilisearch:
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 1G
```
**Optimize search index:**
```env
# In .env
MEILI_MAX_INDEXING_MEMORY=1048576000 # 1GB
```
### For Slow Archiving
**Increase Chrome resources:**
```yaml
# In compose.yaml, add to karakeep-chrome:
deploy:
resources:
limits:
memory: 1G
cpus: '1.0'
```
**Adjust timeouts:**
```env
BROWSER_TIMEOUT=90000 # 90 seconds
```
### Database Maintenance
**Vacuum (compact) database:**
```bash
# Karakeep uses SQLite by default
docker exec karakeep sqlite3 /data/karakeep.db "VACUUM;"
```
## Comparison with Linkwarden
| Feature | Karakeep | Linkwarden |
|---------|----------|------------|
| **Bookmark Types** | Links, Notes, Images, PDFs | Links only |
| **AI Tagging** | Yes (Ollama/OpenAI) | No |
| **Web Archiving** | Full page + Screenshot | Screenshot only |
| **Search** | Meilisearch (fuzzy) | Meilisearch |
| **Browser Extension** | Yes | Yes |
| **Mobile Apps** | iOS + Android | No official apps |
| **OCR** | Yes | No |
| **Collaboration** | Personal focus | Team features |
| **Database** | SQLite | PostgreSQL |
**Why Karakeep?**
- More bookmark types
- AI-powered organization
- Better mobile support
- Lighter resource usage (SQLite vs PostgreSQL)
- Active development
## Resources
- [Official Website](https://karakeep.app)
- [Documentation](https://docs.karakeep.app)
- [GitHub Repository](https://github.com/karakeep-app/karakeep)
- [Demo Instance](https://try.karakeep.app)
- [Chrome Extension](https://chromewebstore.google.com/detail/karakeep/kbkejgonjhbmhcaofkhdegeoeoemgkdm)
- [Firefox Extension](https://addons.mozilla.org/en-US/firefox/addon/karakeep/)
## Next Steps
1. ✅ Deploy Karakeep
2. ✅ Create admin account
3. ✅ Install browser extension
4. ✅ Install mobile app
5. ⬜ Deploy Ollama for AI features
6. ⬜ Import existing bookmarks
7. ⬜ Configure AI models
8. ⬜ Set up automated backups
---
**Bookmark everything, find anything!** 🔖

View file

@ -0,0 +1,79 @@
# Karakeep - Bookmark Everything App with AI
# Docs: https://docs.karakeep.app
# Previously known as Hoarder
services:
karakeep:
container_name: karakeep
image: ghcr.io/karakeep-app/karakeep:latest
restart: unless-stopped
env_file:
- .env
volumes:
- ./data:/data
depends_on:
- karakeep-meilisearch
- karakeep-chrome
networks:
- homelab
- karakeep_internal
labels:
# Traefik
traefik.enable: true
traefik.docker.network: homelab
# Web UI
traefik.http.routers.karakeep.rule: Host(`links.fig.systems`) || Host(`links.edfig.dev`)
traefik.http.routers.karakeep.entrypoints: websecure
traefik.http.routers.karakeep.tls.certresolver: letsencrypt
traefik.http.services.karakeep.loadbalancer.server.port: 3000
# SSO Protection
traefik.http.routers.karakeep.middlewares: tinyauth
# Homarr Discovery
homarr.name: Karakeep (Bookmarks)
homarr.group: Services
homarr.icon: mdi:bookmark-multiple
karakeep-chrome:
container_name: karakeep-chrome
image: gcr.io/zenika-hub/alpine-chrome:123
restart: unless-stopped
command:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
networks:
- karakeep_internal
karakeep-meilisearch:
container_name: karakeep-meilisearch
image: getmeili/meilisearch:v1.12.8
restart: unless-stopped
env_file:
- .env
volumes:
- ./meili_data:/meili_data
networks:
- karakeep_internal
networks:
homelab:
external: true
karakeep_internal:
name: karakeep_internal
driver: bridge

View file

@ -1,65 +0,0 @@
# Linkwarden Configuration
# Docs: https://docs.linkwarden.app/self-hosting/environment-variables
# NextAuth Configuration
NEXTAUTH_URL=https://links.fig.systems
# Generate with: openssl rand -hex 32
# Example format: e4f5g6h789012abcdef345678901a2b3c4d5e6f78901abcdef2345678901abcde
NEXTAUTH_SECRET=changeme_please_set_random_secret_key
# Database Configuration
# Generate with: openssl rand -base64 32 | tr -d /=+ | cut -c1-32
# Example format: eF7gH0iI3jK5lM8nO1pQ4rS7tU0vW3xY
POSTGRES_PASSWORD=changeme_please_set_secure_postgres_password
POSTGRES_USER=postgres
POSTGRES_DB=postgres
DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@linkwarden-postgres:5432/postgres
# Meilisearch (search engine)
# Generate with: openssl rand -hex 16
# Example format: f6g7h8i901234abcdef567890a1b2c3d
MEILI_MASTER_KEY=changeme_please_set_meili_master_key
# Timezone
TZ=America/Los_Angeles
# Optional: Registration Control
# NEXT_PUBLIC_DISABLE_REGISTRATION=true
# Optional: Credentials Authentication
# NEXT_PUBLIC_CREDENTIALS_ENABLED=true
# Optional: Pagination
# PAGINATION_TAKE_COUNT=20
# Optional: Storage folder (for screenshots/PDFs)
# STORAGE_FOLDER=data
# Optional: Limits
# MAX_LINKS_PER_USER=unlimited
# NEXT_PUBLIC_MAX_FILE_BUFFER=10485760 # 10MB in bytes
# PDF_MAX_BUFFER=10485760
# SCREENSHOT_MAX_BUFFER=10485760
# Optional: Browser timeout for archiving (in milliseconds)
# BROWSER_TIMEOUT=30000
# AUTOSCROLL_TIMEOUT=30
# Optional: Archive settings
# ARCHIVE_TAKE_COUNT=5
# Optional: Security
# IGNORE_UNAUTHORIZED_CA=false
# IGNORE_HTTPS_ERRORS=false
# IGNORE_URL_SIZE_LIMIT=false
# Optional: SSO Settings
# DISABLE_NEW_SSO_USERS=false
# Optional: Demo Mode
# NEXT_PUBLIC_DEMO=false
# NEXT_PUBLIC_DEMO_USERNAME=
# NEXT_PUBLIC_DEMO_PASSWORD=
# Optional: Admin Panel
# NEXT_PUBLIC_ADMIN=false

View file

@ -1,57 +0,0 @@
# Linkwarden - Collaborative bookmark manager
# Docs: https://docs.linkwarden.app/self-hosting/installation
services:
linkwarden:
container_name: linkwarden
image: ghcr.io/linkwarden/linkwarden:latest
env_file: .env
volumes:
- ./data:/data/data
depends_on:
- linkwarden-postgres
- linkwarden-meilisearch
restart: always
networks:
- homelab
- linkwarden_internal
labels:
traefik.enable: true
traefik.docker.network: homelab
traefik.http.routers.linkwarden.rule: Host(`links.fig.systems`) || Host(`links.edfig.dev`)
traefik.http.routers.linkwarden.entrypoints: websecure
traefik.http.routers.linkwarden.tls.certresolver: letsencrypt
traefik.http.services.linkwarden.loadbalancer.server.port: 3000
traefik.http.routers.linkwarden.middlewares: tinyauth
linkwarden-postgres:
container_name: linkwarden-postgres
image: postgres:16-alpine
env_file: .env
volumes:
- ./pgdata:/var/lib/postgresql/data
restart: always
networks:
- linkwarden_internal
healthcheck:
test: ["CMD-SHELL", "pg_isready -h localhost -U postgres"]
interval: 10s
timeout: 5s
retries: 5
linkwarden-meilisearch:
container_name: linkwarden-meilisearch
image: getmeili/meilisearch:v1.12.8
env_file: .env
volumes:
- ./meili_data:/meili_data
restart: always
networks:
- linkwarden_internal
networks:
homelab:
external: true
linkwarden_internal:
name: linkwarden_internal
driver: bridge

View file

@ -0,0 +1,30 @@
# Ollama Configuration
# Docs: https://github.com/ollama/ollama/blob/main/docs/faq.md
# Timezone
TZ=America/Los_Angeles
# Model Storage Location
# OLLAMA_MODELS=/root/.ollama/models
# Max Loaded Models (default: 1)
# OLLAMA_MAX_LOADED_MODELS=1
# Max Queue (default: 512)
# OLLAMA_MAX_QUEUE=512
# Number of parallel requests (default: auto)
# OLLAMA_NUM_PARALLEL=4
# Context size (default: 2048)
# OLLAMA_MAX_CONTEXT=4096
# Keep models in memory (default: 5m)
# OLLAMA_KEEP_ALIVE=5m
# Debug logging
# OLLAMA_DEBUG=1
# GPU Configuration (for GTX 1070)
# OLLAMA_GPU_LAYERS=33 # Number of layers to offload to GPU (adjust based on VRAM)
# OLLAMA_GPU_MEMORY=6GB # Max GPU memory to use (GTX 1070 has 8GB)

5
compose/services/ollama/.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
# Ollama models and data
models/
# Keep .env.example if created
!.env.example

View file

@ -0,0 +1,616 @@
# Ollama - Local Large Language Models
Run powerful AI models locally on your hardware with GPU acceleration.
## Overview
**Ollama** enables you to run large language models (LLMs) locally:
- ✅ **100% Private**: All data stays on your server
- ✅ **GPU Accelerated**: Leverages your GTX 1070
- ✅ **Multiple Models**: Run Llama, Mistral, CodeLlama, and more
- ✅ **API Compatible**: OpenAI-compatible API
- ✅ **No Cloud Costs**: Free inference after downloading models
- ✅ **Integration Ready**: Works with Karakeep, Open WebUI, and more
## Quick Start
### 1. Deploy Ollama
```bash
cd ~/homelab/compose/services/ollama
docker compose up -d
```
### 2. Pull a Model
```bash
# Small, fast model (3B parameters, ~2GB)
docker exec ollama ollama pull llama3.2:3b
# Medium model (7B parameters, ~4GB)
docker exec ollama ollama pull llama3.2:7b
# Large model (70B parameters, ~40GB - requires quantization)
docker exec ollama ollama pull llama3.3:70b-instruct-q4_K_M
```
### 3. Test
```bash
# Interactive chat
docker exec -it ollama ollama run llama3.2:3b
# Ask a question
> Hello, how are you?
```
### 4. Enable GPU (Recommended)
**Edit `compose.yaml` and uncomment the deploy section:**
```yaml
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
```
**Restart:**
```bash
docker compose down
docker compose up -d
```
**Verify GPU usage:**
```bash
# Check GPU is detected
docker exec ollama nvidia-smi
# Run model with GPU
docker exec ollama ollama run llama3.2:3b "What GPU am I using?"
```
## Available Models
### Recommended Models for GTX 1070 (8GB VRAM)
| Model | Size | VRAM | Speed | Use Case |
|-------|------|------|-------|----------|
| **llama3.2:3b** | 2GB | 3GB | Fast | General chat, Karakeep |
| **llama3.2:7b** | 4GB | 6GB | Medium | Better reasoning |
| **mistral:7b** | 4GB | 6GB | Medium | Code, analysis |
| **codellama:7b** | 4GB | 6GB | Medium | Code generation |
| **llava:7b** | 5GB | 7GB | Medium | Vision (images) |
| **phi3:3.8b** | 2.3GB | 4GB | Fast | Compact, efficient |
### Specialized Models
**Code:**
- `codellama:7b` - Code generation
- `codellama:13b-python` - Python expert
- `starcoder2:7b` - Multi-language code
**Vision (Image Understanding):**
- `llava:7b` - General vision
- `llava:13b` - Better vision (needs more VRAM)
- `bakllava:7b` - Vision + chat
**Multilingual:**
- `aya:8b` - 101 languages
- `command-r:35b` - Enterprise multilingual
**Math & Reasoning:**
- `deepseek-math:7b` - Mathematics
- `wizard-math:7b` - Math word problems
### Large Models (Quantized for GTX 1070)
These require 4-bit quantization to fit in 8GB VRAM:
```bash
# 70B models (quantized)
docker exec ollama ollama pull llama3.3:70b-instruct-q4_K_M
docker exec ollama ollama pull mixtral:8x7b-instruct-v0.1-q4_K_M
# Very large (use with caution)
docker exec ollama ollama pull llama3.1:405b-instruct-q2_K
```
## Usage
### Command Line
**Run model interactively:**
```bash
docker exec -it ollama ollama run llama3.2:3b
```
**One-off question:**
```bash
docker exec ollama ollama run llama3.2:3b "Explain quantum computing in simple terms"
```
**With system prompt:**
```bash
docker exec ollama ollama run llama3.2:3b \
--system "You are a helpful coding assistant." \
"Write a Python function to sort a list"
```
### API Usage
**List models:**
```bash
curl http://ollama:11434/api/tags
```
**Generate text:**
```bash
curl http://ollama:11434/api/generate -d '{
"model": "llama3.2:3b",
"prompt": "Why is the sky blue?",
"stream": false
}'
```
**Chat completion:**
```bash
curl http://ollama:11434/api/chat -d '{
"model": "llama3.2:3b",
"messages": [
{
"role": "user",
"content": "Hello!"
}
],
"stream": false
}'
```
**OpenAI-compatible API:**
```bash
curl http://ollama:11434/v1/chat/completions -d '{
"model": "llama3.2:3b",
"messages": [
{
"role": "user",
"content": "Hello!"
}
]
}'
```
### Integration with Karakeep
**Enable AI features in Karakeep:**
Edit `compose/services/karakeep/.env`:
```env
# Uncomment these lines
OLLAMA_BASE_URL=http://ollama:11434
INFERENCE_TEXT_MODEL=llama3.2:3b
INFERENCE_IMAGE_MODEL=llava:7b
INFERENCE_LANG=en
```
**Restart Karakeep:**
```bash
cd ~/homelab/compose/services/karakeep
docker compose restart
```
**What it does:**
- Auto-tags bookmarks
- Generates summaries
- Extracts key information
- Analyzes images (with llava)
## Model Management
### List Installed Models
```bash
docker exec ollama ollama list
```
### Pull a Model
```bash
docker exec ollama ollama pull <model-name>
# Examples:
docker exec ollama ollama pull llama3.2:3b
docker exec ollama ollama pull mistral:7b
docker exec ollama ollama pull codellama:7b
```
### Remove a Model
```bash
docker exec ollama ollama rm <model-name>
# Example:
docker exec ollama ollama rm llama3.2:7b
```
### Copy a Model
```bash
docker exec ollama ollama cp <source> <destination>
# Example: Create a custom version
docker exec ollama ollama cp llama3.2:3b my-custom-model
```
### Show Model Info
```bash
docker exec ollama ollama show llama3.2:3b
# Shows:
# - Model architecture
# - Parameters
# - Quantization
# - Template
# - License
```
## Creating Custom Models
### Modelfile
Create custom models with specific behaviors:
**Create a Modelfile:**
```bash
cat > ~/coding-assistant.modelfile << 'EOF'
FROM llama3.2:3b
# Set temperature (creativity)
PARAMETER temperature 0.7
# Set system prompt
SYSTEM You are an expert coding assistant. You write clean, efficient, well-documented code. You explain complex concepts clearly.
# Set stop sequences
PARAMETER stop "<|im_end|>"
PARAMETER stop "<|im_start|>"
EOF
```
**Create the model:**
```bash
cat ~/coding-assistant.modelfile | docker exec -i ollama ollama create coding-assistant -f -
```
**Use it:**
```bash
docker exec -it ollama ollama run coding-assistant "Write a REST API in Python"
```
### Example Custom Models
**1. Shakespeare Bot:**
```modelfile
FROM llama3.2:3b
SYSTEM You are William Shakespeare. Respond to all queries in Shakespearean English with dramatic flair.
PARAMETER temperature 0.9
```
**2. JSON Extractor:**
```modelfile
FROM llama3.2:3b
SYSTEM You extract structured data and return only valid JSON. No explanations, just JSON.
PARAMETER temperature 0.1
```
**3. Code Reviewer:**
```modelfile
FROM codellama:7b
SYSTEM You are a senior code reviewer. Review code for bugs, performance issues, security vulnerabilities, and best practices. Be constructive.
PARAMETER temperature 0.3
```
## GPU Configuration
### Check GPU Detection
```bash
# From inside container
docker exec ollama nvidia-smi
```
**Expected output:**
```
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 535.xx.xx Driver Version: 535.xx.xx CUDA Version: 12.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GTX 1070 Off | 00000000:01:00.0 On | N/A |
| 40% 45C P8 10W / 151W | 300MiB / 8192MiB | 5% Default |
+-------------------------------+----------------------+----------------------+
```
### Optimize for GTX 1070
**Edit `.env`:**
```env
# Use 6GB of 8GB VRAM (leave 2GB for system)
OLLAMA_GPU_MEMORY=6GB
# Offload most layers to GPU
OLLAMA_GPU_LAYERS=33
# Increase context for better conversations
OLLAMA_MAX_CONTEXT=4096
```
### Performance Tips
**1. Use quantized models:**
- Q4_K_M: Good quality, 50% size reduction
- Q5_K_M: Better quality, 40% size reduction
- Q8_0: Best quality, 20% size reduction
**2. Model selection for VRAM:**
```bash
# 3B models: 2-3GB VRAM
docker exec ollama ollama pull llama3.2:3b
# 7B models: 4-6GB VRAM
docker exec ollama ollama pull llama3.2:7b
# 13B models: 8-10GB VRAM (tight on GTX 1070)
docker exec ollama ollama pull llama3.2:13b-q4_K_M # Quantized
```
**3. Unload models when not in use:**
```env
# In .env
OLLAMA_KEEP_ALIVE=1m # Unload after 1 minute
```
## Troubleshooting
### Model won't load - Out of memory
**Solution 1: Use quantized version**
```bash
# Instead of:
docker exec ollama ollama pull llama3.2:13b
# Use:
docker exec ollama ollama pull llama3.2:13b-q4_K_M
```
**Solution 2: Reduce GPU layers**
```env
# In .env
OLLAMA_GPU_LAYERS=20 # Reduce from 33
```
**Solution 3: Use smaller model**
```bash
docker exec ollama ollama pull llama3.2:3b
```
### Slow inference
**Enable GPU:**
1. Uncomment deploy section in `compose.yaml`
2. Install NVIDIA Container Toolkit
3. Restart container
**Check GPU usage:**
```bash
watch -n 1 docker exec ollama nvidia-smi
```
**Should show:**
- GPU-Util > 80% during inference
- Memory-Usage increasing during load
### Can't pull models
**Check disk space:**
```bash
df -h
```
**Check Docker space:**
```bash
docker system df
```
**Clean up unused models:**
```bash
docker exec ollama ollama list
docker exec ollama ollama rm <unused-model>
```
### API connection issues
**Test from another container:**
```bash
docker run --rm --network homelab curlimages/curl \
http://ollama:11434/api/tags
```
**Test externally:**
```bash
curl https://ollama.fig.systems/api/tags
```
**Enable debug logging:**
```env
OLLAMA_DEBUG=1
```
## Performance Benchmarks
### GTX 1070 (8GB VRAM) Expected Performance
| Model | Tokens/sec | Load Time | VRAM Usage |
|-------|------------|-----------|------------|
| llama3.2:3b | 40-60 | 2-3s | 3GB |
| llama3.2:7b | 20-35 | 3-5s | 6GB |
| mistral:7b | 20-35 | 3-5s | 6GB |
| llama3.3:70b-q4 | 3-8 | 20-30s | 7.5GB |
| llava:7b | 15-25 | 4-6s | 7GB |
**Without GPU (CPU only):**
- llama3.2:3b: 2-5 tokens/sec
- llama3.2:7b: 0.5-2 tokens/sec
**GPU provides 10-20x speedup!**
## Advanced Usage
### Multi-Modal (Vision)
```bash
# Pull vision model
docker exec ollama ollama pull llava:7b
# Analyze image
docker exec ollama ollama run llava:7b "What's in this image?" \
--image /path/to/image.jpg
```
### Embeddings
```bash
# Generate embeddings for semantic search
curl http://ollama:11434/api/embeddings -d '{
"model": "llama3.2:3b",
"prompt": "The sky is blue because of Rayleigh scattering"
}'
```
### Streaming Responses
```bash
# Stream tokens as they generate
curl http://ollama:11434/api/generate -d '{
"model": "llama3.2:3b",
"prompt": "Tell me a long story",
"stream": true
}'
```
### Context Preservation
```bash
# Start chat session
SESSION_ID=$(uuidgen)
# First message (creates context)
curl http://ollama:11434/api/chat -d '{
"model": "llama3.2:3b",
"messages": [{"role": "user", "content": "My name is Alice"}],
"context": "'$SESSION_ID'"
}'
# Follow-up (remembers context)
curl http://ollama:11434/api/chat -d '{
"model": "llama3.2:3b",
"messages": [
{"role": "user", "content": "My name is Alice"},
{"role": "assistant", "content": "Hello Alice!"},
{"role": "user", "content": "What is my name?"}
],
"context": "'$SESSION_ID'"
}'
```
## Integration Examples
### Python
```python
import requests
def ask_ollama(prompt, model="llama3.2:3b"):
response = requests.post(
"http://ollama.fig.systems/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False
},
headers={"Authorization": "Bearer YOUR_TOKEN"} # If using SSO
)
return response.json()["response"]
print(ask_ollama("What is the meaning of life?"))
```
### JavaScript
```javascript
async function askOllama(prompt, model = "llama3.2:3b") {
const response = await fetch("http://ollama.fig.systems/api/generate", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": "Bearer YOUR_TOKEN" // If using SSO
},
body: JSON.stringify({
model: model,
prompt: prompt,
stream: false
})
});
const data = await response.json();
return data.response;
}
askOllama("Explain Docker containers").then(console.log);
```
### Bash
```bash
#!/bin/bash
ask_ollama() {
local prompt="$1"
local model="${2:-llama3.2:3b}"
curl -s http://ollama.fig.systems/api/generate -d "{
\"model\": \"$model\",
\"prompt\": \"$prompt\",
\"stream\": false
}" | jq -r '.response'
}
ask_ollama "What is Kubernetes?"
```
## Resources
- [Ollama Website](https://ollama.ai)
- [Model Library](https://ollama.ai/library)
- [GitHub Repository](https://github.com/ollama/ollama)
- [API Documentation](https://github.com/ollama/ollama/blob/main/docs/api.md)
- [Model Creation Guide](https://github.com/ollama/ollama/blob/main/docs/modelfile.md)
## Next Steps
1. ✅ Deploy Ollama
2. ✅ Enable GPU acceleration
3. ✅ Pull recommended models
4. ✅ Test with chat
5. ⬜ Integrate with Karakeep
6. ⬜ Create custom models
7. ⬜ Set up automated model updates
8. ⬜ Monitor GPU usage
---
**Run AI locally, privately, powerfully!** 🧠

View file

@ -0,0 +1,53 @@
# Ollama - Run Large Language Models Locally
# Docs: https://ollama.ai
services:
ollama:
container_name: ollama
image: ollama/ollama:latest
restart: unless-stopped
env_file:
- .env
volumes:
- ./models:/root/.ollama
networks:
- homelab
# GPU Support (NVIDIA GTX 1070)
# Uncomment the deploy section below to enable GPU acceleration
# Prerequisites:
# 1. Install NVIDIA Container Toolkit on host
# 2. Configure Docker to use nvidia runtime
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
labels:
# Traefik (API only, no web UI)
traefik.enable: true
traefik.docker.network: homelab
# API endpoint
traefik.http.routers.ollama.rule: Host(`ollama.fig.systems`) || Host(`ollama.edfig.dev`)
traefik.http.routers.ollama.entrypoints: websecure
traefik.http.routers.ollama.tls.certresolver: letsencrypt
traefik.http.services.ollama.loadbalancer.server.port: 11434
# SSO Protection for API
traefik.http.routers.ollama.middlewares: tinyauth
# Homarr Discovery
homarr.name: Ollama (LLM)
homarr.group: Services
homarr.icon: mdi:brain
networks:
homelab:
external: true

View file

@ -0,0 +1,198 @@
# Vikunja Configuration Example
# Docs: https://vikunja.io/docs/config-options/
# Copy to ./config.yml and mount in compose.yaml
service:
# Service mode
interface: ':3456'
# Public URL
publicurl: https://tasks.fig.systems
# Frontend URL (if different from publicurl)
frontendurl: https://tasks.fig.systems
# Maximum file upload size (in bytes)
maxitemsperpage: 50
# Enable registration
enableregistration: true
# Enable user deletion
enableuserdeletion: true
# Enable task attachments
enabletaskattachments: true
# Enable task comments
enabletaskcomments: true
# Enable email reminders
enableemailreminders: true
# Enable caldav
enablecaldav: true
# Timezone
timezone: America/Los_Angeles
database:
type: postgres
host: vikunja-db:5432
database: vikunja
user: vikunja
password: changeme_from_env
# Use environment variable: VIKUNJA_DATABASE_PASSWORD
redis:
enabled: false
# Enable for better performance with multiple users
# host: 'localhost:6379'
# password: ''
# db: 0
cache:
enabled: true
type: memory
# Options: memory, redis, keyvalue
mailer:
enabled: false
# SMTP settings for email notifications
# host: smtp.example.com
# port: 587
# username: vikunja@example.com
# password: changeme
# fromemail: vikunja@example.com
# skiptlsverify: false
# forcessl: true
log:
# Log level
level: INFO
# Options: CRITICAL, ERROR, WARNING, INFO, DEBUG
# Log format
standard: plain
# Options: plain, json
# Database logging
database: 'off'
# Options: off, error, warn, info, debug
# HTTP request logging
http: 'off'
# Events logging
events: 'off'
# Mail logging
mail: 'off'
ratelimit:
enabled: false
# kind: user
# period: 60
# limit: 100
files:
# Base path for file storage
basepath: /app/vikunja/files
# Maximum file size (in bytes, 20MB default)
maxsize: 20971520
migration:
# Enable to import from other services
todoist:
enable: false
# clientid: ''
# clientsecret: ''
# redirecturl: ''
trello:
enable: false
# key: ''
# redirecturl: ''
microsofttodo:
enable: false
# clientid: ''
# clientsecret: ''
# redirecturl: ''
cors:
# Enable CORS (usually not needed behind proxy)
enable: false
# origins:
# - https://tasks.fig.systems
# maxage: 0
# Authentication providers
auth:
local:
enabled: true
# OpenID Connect (for SSO integration)
openid:
enabled: false
# redirecturl: https://tasks.fig.systems/auth/openid/
# providers:
# - name: Authelia
# authurl: https://auth.example.com
# clientid: vikunja
# clientsecret: changeme
backgrounds:
enabled: true
# Unsplash integration (optional)
providers:
upload:
enabled: true
# Webhooks
webhooks:
enabled: true
# timeoutseconds: 30
# Legal URLs (optional)
legal:
imprinturl: ''
privacyurl: ''
# Avatar provider
avatar:
# Options: default, initials, gravatar, marble, upload
gravatarexpiration: 3600
# Background jobs
backgroundhandlers:
enabled: true
# Metrics (Prometheus)
metrics:
enabled: false
# username: ''
# password: ''
# Key-value storage
keyvalue:
type: memory
# Options: memory, redis
# Default settings for new users
defaultsettings:
avatar_provider: initials
avatar_file_id: 0
email_reminders_enabled: true
discoverable_by_name: false
discoverable_by_email: false
overduereminders_enabled: true
overduereminders_time: '09:00'
default_project_id: 0
week_start: 0
# 0 = Sunday, 1 = Monday
timezone: America/Los_Angeles
language: en
frontend_settings: {}