Sync from upstream llama.cpp repository
70
tools/server/CMakeLists.txt
Normal file
@@ -0,0 +1,70 @@
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
# server-context containing the core server logic, used by llama-server and CLI
|
||||
|
||||
set(TARGET server-context)
|
||||
|
||||
add_library(${TARGET} STATIC
|
||||
server-task.cpp
|
||||
server-task.h
|
||||
server-queue.cpp
|
||||
server-queue.h
|
||||
server-common.cpp
|
||||
server-common.h
|
||||
server-context.cpp
|
||||
server-context.h
|
||||
)
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
|
||||
target_include_directories(${TARGET} PRIVATE ../mtmd)
|
||||
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
|
||||
target_link_libraries(${TARGET} PUBLIC common mtmd ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
|
||||
# llama-server executable
|
||||
|
||||
set(TARGET llama-server)
|
||||
|
||||
if (NOT LLAMA_HTTPLIB)
|
||||
message(FATAL_ERROR "LLAMA_HTTPLIB is OFF, cannot build llama-server. Hint: to skip building server, set -DLLAMA_BUILD_SERVER=OFF")
|
||||
endif()
|
||||
|
||||
set(TARGET_SRCS
|
||||
server.cpp
|
||||
server-http.cpp
|
||||
server-http.h
|
||||
server-models.cpp
|
||||
server-models.h
|
||||
)
|
||||
set(PUBLIC_ASSETS
|
||||
index.html.gz
|
||||
loading.html
|
||||
)
|
||||
|
||||
foreach(asset ${PUBLIC_ASSETS})
|
||||
set(input "${CMAKE_CURRENT_SOURCE_DIR}/public/${asset}")
|
||||
set(output "${CMAKE_CURRENT_BINARY_DIR}/${asset}.hpp")
|
||||
list(APPEND TARGET_SRCS ${output})
|
||||
add_custom_command(
|
||||
DEPENDS "${input}"
|
||||
OUTPUT "${output}"
|
||||
COMMAND "${CMAKE_COMMAND}" "-DINPUT=${input}" "-DOUTPUT=${output}" -P "${PROJECT_SOURCE_DIR}/scripts/xxd.cmake"
|
||||
)
|
||||
set_source_files_properties(${output} PROPERTIES GENERATED TRUE)
|
||||
endforeach()
|
||||
|
||||
add_executable(${TARGET} ${TARGET_SRCS})
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
|
||||
target_include_directories(${TARGET} PRIVATE ../mtmd)
|
||||
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
|
||||
target_link_libraries(${TARGET} PRIVATE server-context PUBLIC common cpp-httplib ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if (WIN32)
|
||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||
endif()
|
||||
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
||||
179
tools/server/README-dev.md
Normal file
@@ -0,0 +1,179 @@
|
||||
# llama-server Development Documentation
|
||||
|
||||
This document provides an in-depth technical overview of `llama-server`, intended for maintainers and contributors.
|
||||
|
||||
If you are an end user consuming `llama-server` as a product, please refer to the main [README](./README.md) instead.
|
||||
|
||||
## Backend
|
||||
|
||||
### Overview
|
||||
|
||||
The server supports two primary operating modes:
|
||||
|
||||
- **Inference mode**: The default mode for performing inference with a single loaded GGUF model.
|
||||
- **Router mode**: Enables management of multiple inference server instances behind a single API endpoint. Requests are automatically routed to the appropriate backend instance based on the requested model.
|
||||
|
||||
The core architecture consists of the following components:
|
||||
|
||||
- `server_context`: Holds the primary inference state, including the main `llama_context` and all active slots.
|
||||
- `server_slot`: An abstraction over a single “sequence” in llama.cpp, responsible for managing individual parallel inference requests.
|
||||
- `server_routes`: Middleware layer between `server_context` and the HTTP interface; handles JSON parsing/formatting and request routing logic.
|
||||
- `server_http_context`: Implements the HTTP server using `cpp-httplib`.
|
||||
- `server_queue`: Thread-safe queue used by HTTP workers to submit new tasks to `server_context`.
|
||||
- `server_response`: Thread-safe queue used by `server_context` to return results to HTTP workers.
|
||||
- `server_response_reader`: Higher-level wrapper around the two queues above for cleaner code.
|
||||
- `server_task`: Unit of work pushed into `server_queue`.
|
||||
- `server_task_result`: Unit of result pushed into `server_response`.
|
||||
- `server_tokens`: Unified representation of token sequences (supports both text and multimodal tokens); used by `server_task` and `server_slot`.
|
||||
- `server_prompt_checkpoint`: For recurrent (e.g., RWKV) and SWA models, stores snapshots of KV cache state. Enables reuse when subsequent requests share the same prompt prefix, saving redundant computation.
|
||||
- `server_models`: Standalone component for managing multiple backend instances (used in router mode). It is completely independent of `server_context`.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
API_User <--> server_http_context
|
||||
server_http_context <-- router mode --> server_models
|
||||
server_http_context <-- inference mode --> server_routes
|
||||
server_routes -- server_task --> server_queue
|
||||
subgraph server_context
|
||||
server_queue --> server_slot
|
||||
server_slot -- server_task_result --> server_response
|
||||
server_slot[multiple server_slot]
|
||||
end
|
||||
server_response --> server_routes
|
||||
```
|
||||
|
||||
### Batching
|
||||
|
||||
The server context maintains a single batch shared across all slots. When `update_slots()` is invoked, the system iterates through all active slots to populate this batch. For each slot, either a generated token from the previous decoding step or available prompt tokens are added to the batch.
|
||||
|
||||
Batching constraints apply: slots can only be batched together if they share compatible configurations. For instance, slots using a specific LoRA adapter can be batched with each other, but not with slots using a different LoRA adapter or no adapter at all.
|
||||
|
||||
Once the batch reaches capacity or all slots have been processed, `llama_decode` is called to execute the inference. This operation represents the primary computational bottleneck in `update_slots()`.
|
||||
|
||||
Following decoding, the system either retrieves embeddings or samples the next token using `common_sampler_sample`. If a slot has remaining prompt tokens to process, it yields until the next `update_slots()` iteration.
|
||||
|
||||
### Thread Management
|
||||
|
||||
`server_context` runs on a dedicated single thread. Because it is single-threaded, heavy post-processing (especially after token generation) should be avoided, as it directly impacts multi-sequence throughput.
|
||||
|
||||
Each incoming HTTP request is handled by its own thread managed by the HTTP library. The following operations are performed in HTTP worker threads:
|
||||
|
||||
- JSON request parsing
|
||||
- Chat template application
|
||||
- Tokenization
|
||||
- Conversion of `server_task_result` into final JSON response
|
||||
- Error formatting into JSON
|
||||
- Tracking of partial/incremental responses (e.g., streaming tool calls or reasoning steps)
|
||||
|
||||
**Best practices to follow:**
|
||||
|
||||
- All JSON formatting and chat template logic must stay in the HTTP layer.
|
||||
- Avoid passing raw JSON between the HTTP layer and `server_slot`. Instead, parse everything into native C++ types as early as possible.
|
||||
|
||||
### Example trace of a request
|
||||
|
||||
Here is an example trace of an API request for text completion:
|
||||
|
||||
- A request arrives at the HTTP layer.
|
||||
- The request is routed to the corresponding handler inside `server_routes`. In this case, `handle_completions_impl` is invoked.
|
||||
- The handler parses the input request, constructs a new `server_task`, and passes it to `server_res_generator`.
|
||||
- `server_res_generator` creates a new `task_result_state` for each task:
|
||||
- `task_result_state` stays in the HTTP layer, responsible for keeping track of the current state of the response (e.g., parsing tool calls or thinking messages).
|
||||
- `server_task` is moved into `server_queue` inside `server_context`.
|
||||
- `server_context` launches the task by moving it into an available slot (see `launch_slot_with_task()`).
|
||||
- `update_slot()` processes the task as described in the "Batching" section above.
|
||||
- Results may be sent using `send_partial_response` or `send_final_response`, which creates a new `server_task_result` and pushes it to the response queue.
|
||||
- At the same time, `server_res_generator` listens to the response queue and retrieves this response.
|
||||
- As the response is stateless, `server_res_generator` calls `response->update()` to update the response with the current state.
|
||||
- `server_res_generator` then calls `response->to_json()` and passes the response to the HTTP layer.
|
||||
|
||||
### Testing
|
||||
|
||||
`llama-server` includes an automated test suite based on `pytest`.
|
||||
|
||||
The framework automatically starts a `llama-server` instance, sends requests, and validates responses.
|
||||
|
||||
For detailed instructions, see the [test documentation](./tests/README.md).
|
||||
|
||||
### Notable Related PRs
|
||||
|
||||
- Initial server implementation: https://github.com/ggml-org/llama.cpp/pull/1443
|
||||
- Parallel decoding support: https://github.com/ggml-org/llama.cpp/pull/3228
|
||||
- Refactor introducing `server_queue` and `server_response`: https://github.com/ggml-org/llama.cpp/pull/5065
|
||||
- Reranking endpoint: https://github.com/ggml-org/llama.cpp/pull/9510
|
||||
- Multimodal model support (`libmtmd`): https://github.com/ggml-org/llama.cpp/pull/12898
|
||||
- Unified KV cache handling: https://github.com/ggml-org/llama.cpp/pull/16736
|
||||
- Separation of HTTP logic into dedicated files: https://github.com/ggml-org/llama.cpp/pull/17216
|
||||
- Large-scale code base split into smaller files: https://github.com/ggml-org/llama.cpp/pull/17362
|
||||
- Introduction of router mode: https://github.com/ggml-org/llama.cpp/pull/17470
|
||||
- Speculative decoding: https://github.com/ggml-org/llama.cpp/pull/17808 and rework in https://github.com/ggml-org/llama.cpp/pull/17808
|
||||
- INI presets: https://github.com/ggml-org/llama.cpp/pull/17859 (+ refactoring: https://github.com/ggml-org/llama.cpp/pull/18169)
|
||||
- Sleeping mode: https://github.com/ggml-org/llama.cpp/pull/18228
|
||||
|
||||
|
||||
|
||||
|
||||
## Web UI
|
||||
|
||||
The project includes a web-based user interface for interacting with `llama-server`. It supports both single-model (`MODEL` mode) and multi-model (`ROUTER` mode) operation.
|
||||
|
||||
The SvelteKit-based Web UI is introduced in this PR: https://github.com/ggml-org/llama.cpp/pull/14839
|
||||
|
||||
### Features
|
||||
|
||||
- **Chat interface** with streaming responses
|
||||
- **Multi-model support** (ROUTER mode) - switch between models, auto-load on selection
|
||||
- **Modality validation** - ensures selected model supports conversation's attachments (images, audio)
|
||||
- **Conversation management** - branching, regeneration, editing with history preservation
|
||||
- **Attachment support** - images, audio, PDFs (with vision/text fallback)
|
||||
- **Configurable parameters** - temperature, top_p, etc. synced with server defaults
|
||||
- **Dark/light theme**
|
||||
|
||||
### Tech Stack
|
||||
|
||||
- **SvelteKit** - frontend framework with Svelte 5 runes for reactive state
|
||||
- **TailwindCSS** + **shadcn-svelte** - styling and UI components
|
||||
- **Vite** - build tooling
|
||||
- **IndexedDB** (Dexie) - local storage for conversations
|
||||
- **LocalStorage** - user settings persistence
|
||||
|
||||
### Architecture
|
||||
|
||||
The WebUI follows a layered architecture:
|
||||
|
||||
```
|
||||
Routes → Components → Hooks → Stores → Services → Storage/API
|
||||
```
|
||||
|
||||
- **Stores** - reactive state management (`chatStore`, `conversationsStore`, `modelsStore`, `serverStore`, `settingsStore`)
|
||||
- **Services** - stateless API/database communication (`ChatService`, `ModelsService`, `PropsService`, `DatabaseService`)
|
||||
- **Hooks** - reusable logic (`useModelChangeValidation`, `useProcessingState`)
|
||||
|
||||
For detailed architecture diagrams, see [`tools/server/webui/docs/`](webui/docs/):
|
||||
|
||||
- `high-level-architecture.mmd` - full architecture with all modules
|
||||
- `high-level-architecture-simplified.mmd` - simplified overview
|
||||
- `data-flow-simplified-model-mode.mmd` - data flow for single-model mode
|
||||
- `data-flow-simplified-router-mode.mmd` - data flow for multi-model mode
|
||||
- `flows/*.mmd` - detailed per-domain flows (chat, conversations, models, etc.)
|
||||
|
||||
### Development
|
||||
|
||||
```sh
|
||||
# make sure you have Node.js installed
|
||||
cd tools/server/webui
|
||||
npm i
|
||||
|
||||
# run dev server (with hot reload)
|
||||
npm run dev
|
||||
|
||||
# run tests
|
||||
npm run test
|
||||
|
||||
# build production bundle
|
||||
npm run build
|
||||
```
|
||||
|
||||
After `public/index.html.gz` has been generated, rebuild `llama-server` as described in the [build](#build) section to include the updated UI.
|
||||
|
||||
**Note:** The Vite dev server automatically proxies API requests to `http://localhost:8080`. Make sure `llama-server` is running on that port during development.
|
||||
1730
tools/server/README.md
Normal file
119
tools/server/bench/README.md
Normal file
@@ -0,0 +1,119 @@
|
||||
### Server benchmark tools
|
||||
|
||||
Benchmark is using [k6](https://k6.io/).
|
||||
|
||||
##### Install k6 and sse extension
|
||||
|
||||
SSE is not supported by default in k6, you have to build k6 with the [xk6-sse](https://github.com/phymbert/xk6-sse) extension.
|
||||
|
||||
Example (assuming golang >= 1.21 is installed):
|
||||
```shell
|
||||
go install go.k6.io/xk6/cmd/xk6@latest
|
||||
$GOPATH/bin/xk6 build master \
|
||||
--with github.com/phymbert/xk6-sse
|
||||
```
|
||||
|
||||
#### Download a dataset
|
||||
|
||||
This dataset was originally proposed in [vLLM benchmarks](https://github.com/vllm-project/vllm/blob/main/benchmarks/README.md).
|
||||
|
||||
```shell
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
```
|
||||
|
||||
#### Download a model
|
||||
Example for PHI-2
|
||||
|
||||
```shell
|
||||
../../../scripts/hf.sh --repo ggml-org/models --file phi-2/ggml-model-q4_0.gguf
|
||||
```
|
||||
|
||||
#### Start the server
|
||||
The server must answer OAI Chat completion requests on `http://localhost:8080/v1` or according to the environment variable `SERVER_BENCH_URL`.
|
||||
|
||||
Example:
|
||||
```shell
|
||||
llama-server --host localhost --port 8080 \
|
||||
--model ggml-model-q4_0.gguf \
|
||||
--cont-batching \
|
||||
--metrics \
|
||||
--parallel 8 \
|
||||
--batch-size 512 \
|
||||
--ctx-size 4096 \
|
||||
-ngl 33
|
||||
```
|
||||
|
||||
#### Run the benchmark
|
||||
|
||||
For 500 chat completions request with 8 concurrent users during maximum 10 minutes, run:
|
||||
```shell
|
||||
./k6 run script.js --duration 10m --iterations 500 --vus 8
|
||||
```
|
||||
|
||||
The benchmark values can be overridden with:
|
||||
- `SERVER_BENCH_URL` server url prefix for chat completions, default `http://localhost:8080/v1`
|
||||
- `SERVER_BENCH_N_PROMPTS` total prompts to randomly select in the benchmark, default `480`
|
||||
- `SERVER_BENCH_MODEL_ALIAS` model alias to pass in the completion request, default `my-model`
|
||||
- `SERVER_BENCH_MAX_TOKENS` max tokens to predict, default: `512`
|
||||
- `SERVER_BENCH_DATASET` path to the benchmark dataset file
|
||||
- `SERVER_BENCH_MAX_PROMPT_TOKENS` maximum prompt tokens to filter out in the dataset: default `1024`
|
||||
- `SERVER_BENCH_MAX_CONTEXT` maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens, default `2048`
|
||||
|
||||
Note: the local tokenizer is just a string space split, real number of tokens will differ.
|
||||
|
||||
Or with [k6 options](https://k6.io/docs/using-k6/k6-options/reference/):
|
||||
|
||||
```shell
|
||||
SERVER_BENCH_N_PROMPTS=500 k6 run script.js --duration 10m --iterations 500 --vus 8
|
||||
```
|
||||
|
||||
To [debug http request](https://k6.io/docs/using-k6/http-debugging/) use `--http-debug="full"`.
|
||||
|
||||
#### Metrics
|
||||
|
||||
Following metrics are available computed from the OAI chat completions response `usage`:
|
||||
- `llamacpp_tokens_second` Trend of `usage.total_tokens / request duration`
|
||||
- `llamacpp_prompt_tokens` Trend of `usage.prompt_tokens`
|
||||
- `llamacpp_prompt_tokens_total_counter` Counter of `usage.prompt_tokens`
|
||||
- `llamacpp_completion_tokens` Trend of `usage.completion_tokens`
|
||||
- `llamacpp_completion_tokens_total_counter` Counter of `usage.completion_tokens`
|
||||
- `llamacpp_completions_truncated_rate` Rate of completions truncated, i.e. if `finish_reason === 'length'`
|
||||
- `llamacpp_completions_stop_rate` Rate of completions stopped by the model, i.e. if `finish_reason === 'stop'`
|
||||
|
||||
The script will fail if too many completions are truncated, see `llamacpp_completions_truncated_rate`.
|
||||
|
||||
K6 metrics might be compared against [server metrics](../README.md), with:
|
||||
|
||||
```shell
|
||||
curl http://localhost:8080/metrics
|
||||
```
|
||||
|
||||
### Using the CI python script
|
||||
The `bench.py` script does several steps:
|
||||
- start the server
|
||||
- define good variable for k6
|
||||
- run k6 script
|
||||
- extract metrics from prometheus
|
||||
|
||||
It aims to be used in the CI, but you can run it manually:
|
||||
|
||||
```shell
|
||||
LLAMA_SERVER_BIN_PATH=../../../cmake-build-release/bin/llama-server python bench.py \
|
||||
--runner-label local \
|
||||
--name local \
|
||||
--branch `git rev-parse --abbrev-ref HEAD` \
|
||||
--commit `git rev-parse HEAD` \
|
||||
--scenario script.js \
|
||||
--duration 5m \
|
||||
--hf-repo ggml-org/models \
|
||||
--hf-file phi-2/ggml-model-q4_0.gguf \
|
||||
--model-path-prefix models \
|
||||
--parallel 4 \
|
||||
-ngl 33 \
|
||||
--batch-size 2048 \
|
||||
--ubatch-size 256 \
|
||||
--ctx-size 4096 \
|
||||
--n-prompts 200 \
|
||||
--max-prompt-tokens 256 \
|
||||
--max-tokens 256
|
||||
```
|
||||
322
tools/server/bench/bench.py
Normal file
@@ -0,0 +1,322 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from contextlib import closing
|
||||
from datetime import datetime
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.dates
|
||||
import matplotlib.pyplot as plt
|
||||
import requests
|
||||
from statistics import mean
|
||||
|
||||
|
||||
def main(args_in: list[str] | None = None) -> None:
|
||||
parser = argparse.ArgumentParser(description="Start server benchmark scenario")
|
||||
parser.add_argument("--name", type=str, help="Bench name", required=True)
|
||||
parser.add_argument("--runner-label", type=str, help="Runner label", required=True)
|
||||
parser.add_argument("--branch", type=str, help="Branch name", default="detached")
|
||||
parser.add_argument("--commit", type=str, help="Commit name", default="dirty")
|
||||
parser.add_argument("--host", type=str, help="Server listen host", default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int, help="Server listen host", default="8080")
|
||||
parser.add_argument("--model-path-prefix", type=str, help="Prefix where to store the model files", default="models")
|
||||
parser.add_argument("--n-prompts", type=int,
|
||||
help="SERVER_BENCH_N_PROMPTS: total prompts to randomly select in the benchmark", required=True)
|
||||
parser.add_argument("--max-prompt-tokens", type=int,
|
||||
help="SERVER_BENCH_MAX_PROMPT_TOKENS: maximum prompt tokens to filter out in the dataset",
|
||||
required=True)
|
||||
parser.add_argument("--max-tokens", type=int,
|
||||
help="SERVER_BENCH_MAX_CONTEXT: maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens",
|
||||
required=True)
|
||||
parser.add_argument("--hf-repo", type=str, help="Hugging Face model repository", required=True)
|
||||
parser.add_argument("--hf-file", type=str, help="Hugging Face model file", required=True)
|
||||
parser.add_argument("-ngl", "--n-gpu-layers", type=int, help="layers to the GPU for computation", required=True)
|
||||
parser.add_argument("--ctx-size", type=int, help="Set the size of the prompt context", required=True)
|
||||
parser.add_argument("--parallel", type=int, help="Set the number of slots for process requests", required=True)
|
||||
parser.add_argument("--batch-size", type=int, help="Set the batch size for prompt processing", required=True)
|
||||
parser.add_argument("--ubatch-size", type=int, help="physical maximum batch size", required=True)
|
||||
parser.add_argument("--scenario", type=str, help="Scenario to run", required=True)
|
||||
parser.add_argument("--duration", type=str, help="Bench scenario", required=True)
|
||||
|
||||
args = parser.parse_args(args_in)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# Start the server and performance scenario
|
||||
try:
|
||||
server_process = start_server(args)
|
||||
except Exception:
|
||||
print("bench: server start error :")
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
sys.exit(1)
|
||||
|
||||
# start the benchmark
|
||||
iterations = 0
|
||||
data = {}
|
||||
try:
|
||||
start_benchmark(args)
|
||||
|
||||
with open("results.github.env", 'w') as github_env:
|
||||
# parse output
|
||||
with open('k6-results.json', 'r') as bench_results:
|
||||
# Load JSON data from file
|
||||
data = json.load(bench_results)
|
||||
for metric_name in data['metrics']:
|
||||
for metric_metric in data['metrics'][metric_name]:
|
||||
value = data['metrics'][metric_name][metric_metric]
|
||||
if isinstance(value, float) or isinstance(value, int):
|
||||
value = round(value, 2)
|
||||
data['metrics'][metric_name][metric_metric]=value
|
||||
github_env.write(
|
||||
f"{escape_metric_name(metric_name)}_{escape_metric_name(metric_metric)}={value}\n")
|
||||
iterations = data['root_group']['checks']['success completion']['passes']
|
||||
|
||||
except Exception:
|
||||
print("bench: error :")
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
|
||||
# Stop the server
|
||||
if server_process:
|
||||
try:
|
||||
print(f"bench: shutting down server pid={server_process.pid} ...")
|
||||
if os.name == 'nt':
|
||||
interrupt = signal.CTRL_C_EVENT
|
||||
else:
|
||||
interrupt = signal.SIGINT
|
||||
server_process.send_signal(interrupt)
|
||||
server_process.wait(0.5)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"server still alive after 500ms, force-killing pid={server_process.pid} ...")
|
||||
server_process.kill() # SIGKILL
|
||||
server_process.wait()
|
||||
|
||||
while is_server_listening(args.host, args.port):
|
||||
time.sleep(0.1)
|
||||
|
||||
title = (f"llama.cpp {args.name} on {args.runner_label}\n "
|
||||
f"duration={args.duration} {iterations} iterations")
|
||||
xlabel = (f"{args.hf_repo}/{args.hf_file}\n"
|
||||
f"parallel={args.parallel} ctx-size={args.ctx_size} ngl={args.n_gpu_layers} batch-size={args.batch_size} ubatch-size={args.ubatch_size} pp={args.max_prompt_tokens} pp+tg={args.max_tokens}\n"
|
||||
f"branch={args.branch} commit={args.commit}")
|
||||
|
||||
# Prometheus
|
||||
end_time = time.time()
|
||||
prometheus_metrics = {}
|
||||
if is_server_listening("0.0.0.0", 9090):
|
||||
metrics = ['prompt_tokens_seconds', 'predicted_tokens_seconds',
|
||||
'kv_cache_usage_ratio', 'requests_processing', 'requests_deferred']
|
||||
|
||||
for metric in metrics:
|
||||
resp = requests.get(f"http://localhost:9090/api/v1/query_range",
|
||||
params={'query': 'llamacpp:' + metric, 'start': start_time, 'end': end_time, 'step': 2})
|
||||
|
||||
with open(f"{metric}.json", 'w') as metric_json:
|
||||
metric_json.write(resp.text)
|
||||
|
||||
if resp.status_code != 200:
|
||||
print(f"bench: unable to extract prometheus metric {metric}: {resp.text}")
|
||||
else:
|
||||
metric_data = resp.json()
|
||||
values = metric_data['data']['result'][0]['values']
|
||||
timestamps, metric_values = zip(*values)
|
||||
metric_values = [float(value) for value in metric_values]
|
||||
prometheus_metrics[metric] = metric_values
|
||||
timestamps_dt = [str(datetime.fromtimestamp(int(ts))) for ts in timestamps]
|
||||
plt.figure(figsize=(16, 10), dpi=80)
|
||||
plt.plot(timestamps_dt, metric_values, label=metric)
|
||||
plt.xticks(rotation=0, fontsize=14, horizontalalignment='center', alpha=.7)
|
||||
plt.yticks(fontsize=12, alpha=.7)
|
||||
|
||||
ylabel = f"llamacpp:{metric}"
|
||||
plt.title(title,
|
||||
fontsize=14, wrap=True)
|
||||
plt.grid(axis='both', alpha=.3)
|
||||
plt.ylabel(ylabel, fontsize=22)
|
||||
plt.xlabel(xlabel, fontsize=14, wrap=True)
|
||||
plt.gca().xaxis.set_major_locator(matplotlib.dates.MinuteLocator())
|
||||
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y-%m-%d %H:%M:%S"))
|
||||
plt.gcf().autofmt_xdate()
|
||||
|
||||
# Remove borders
|
||||
plt.gca().spines["top"].set_alpha(0.0)
|
||||
plt.gca().spines["bottom"].set_alpha(0.3)
|
||||
plt.gca().spines["right"].set_alpha(0.0)
|
||||
plt.gca().spines["left"].set_alpha(0.3)
|
||||
|
||||
# Save the plot as a jpg image
|
||||
plt.savefig(f'{metric}.jpg', dpi=60)
|
||||
plt.close()
|
||||
|
||||
# Mermaid format in case images upload failed
|
||||
with open(f"{metric}.mermaid", 'w') as mermaid_f:
|
||||
mermaid = (
|
||||
f"""---
|
||||
config:
|
||||
xyChart:
|
||||
titleFontSize: 12
|
||||
width: 900
|
||||
height: 600
|
||||
themeVariables:
|
||||
xyChart:
|
||||
titleColor: "#000000"
|
||||
---
|
||||
xychart-beta
|
||||
title "{title}"
|
||||
y-axis "llamacpp:{metric}"
|
||||
x-axis "llamacpp:{metric}" {int(min(timestamps))} --> {int(max(timestamps))}
|
||||
line [{', '.join([str(round(float(value), 2)) for value in metric_values])}]
|
||||
""")
|
||||
mermaid_f.write(mermaid)
|
||||
|
||||
# 140 chars max for commit status description
|
||||
bench_results = {
|
||||
"i": iterations,
|
||||
"req": {
|
||||
"p95": round(data['metrics']["http_req_duration"]["p(95)"], 2),
|
||||
"avg": round(data['metrics']["http_req_duration"]["avg"], 2),
|
||||
},
|
||||
"pp": {
|
||||
"p95": round(data['metrics']["llamacpp_prompt_processing_second"]["p(95)"], 2),
|
||||
"avg": round(data['metrics']["llamacpp_prompt_processing_second"]["avg"], 2),
|
||||
"0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2) if 'prompt_tokens_seconds' in prometheus_metrics else 0,
|
||||
},
|
||||
"tg": {
|
||||
"p95": round(data['metrics']["llamacpp_tokens_second"]["p(95)"], 2),
|
||||
"avg": round(data['metrics']["llamacpp_tokens_second"]["avg"], 2),
|
||||
"0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2) if 'predicted_tokens_seconds' in prometheus_metrics else 0,
|
||||
},
|
||||
}
|
||||
with open("results.github.env", 'a') as github_env:
|
||||
github_env.write(f"BENCH_RESULTS={json.dumps(bench_results, indent=None, separators=(',', ':') )}\n")
|
||||
github_env.write(f"BENCH_ITERATIONS={iterations}\n")
|
||||
|
||||
title = title.replace('\n', ' ')
|
||||
xlabel = xlabel.replace('\n', ' ')
|
||||
github_env.write(f"BENCH_GRAPH_TITLE={title}\n")
|
||||
github_env.write(f"BENCH_GRAPH_XLABEL={xlabel}\n")
|
||||
|
||||
|
||||
def start_benchmark(args):
|
||||
k6_path = './k6'
|
||||
if 'BENCH_K6_BIN_PATH' in os.environ:
|
||||
k6_path = os.environ['BENCH_K6_BIN_PATH']
|
||||
k6_args = [
|
||||
'run', args.scenario,
|
||||
'--no-color',
|
||||
'--no-connection-reuse',
|
||||
'--no-vu-connection-reuse',
|
||||
]
|
||||
k6_args.extend(['--duration', args.duration])
|
||||
k6_args.extend(['--iterations', args.n_prompts])
|
||||
k6_args.extend(['--vus', args.parallel])
|
||||
k6_args.extend(['--summary-export', 'k6-results.json'])
|
||||
k6_args.extend(['--out', 'csv=k6-results.csv'])
|
||||
args = f"SERVER_BENCH_N_PROMPTS={args.n_prompts} SERVER_BENCH_MAX_PROMPT_TOKENS={args.max_prompt_tokens} SERVER_BENCH_MAX_CONTEXT={args.max_tokens} "
|
||||
args = args + ' '.join([str(arg) for arg in [k6_path, *k6_args]])
|
||||
print(f"bench: starting k6 with: {args}")
|
||||
k6_completed = subprocess.run(args, shell=True, stdout=sys.stdout, stderr=sys.stderr)
|
||||
if k6_completed.returncode != 0:
|
||||
raise Exception("bench: unable to run k6")
|
||||
|
||||
|
||||
def start_server(args):
|
||||
server_process = start_server_background(args)
|
||||
|
||||
attempts = 0
|
||||
max_attempts = 600
|
||||
if 'GITHUB_ACTIONS' in os.environ:
|
||||
max_attempts *= 2
|
||||
|
||||
while not is_server_listening(args.host, args.port):
|
||||
attempts += 1
|
||||
if attempts > max_attempts:
|
||||
assert False, "server not started"
|
||||
print(f"bench: waiting for server to start ...")
|
||||
time.sleep(0.5)
|
||||
|
||||
attempts = 0
|
||||
while not is_server_ready(args.host, args.port):
|
||||
attempts += 1
|
||||
if attempts > max_attempts:
|
||||
assert False, "server not ready"
|
||||
print(f"bench: waiting for server to be ready ...")
|
||||
time.sleep(0.5)
|
||||
|
||||
print("bench: server started and ready.")
|
||||
return server_process
|
||||
|
||||
|
||||
def start_server_background(args):
|
||||
# Start the server
|
||||
server_path = '../../../build/bin/llama-server'
|
||||
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
||||
server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||
server_args = [
|
||||
'--host', args.host,
|
||||
'--port', args.port,
|
||||
]
|
||||
server_args.extend(['--hf-repo', args.hf_repo])
|
||||
server_args.extend(['--hf-file', args.hf_file])
|
||||
server_args.extend(['--n-gpu-layers', args.n_gpu_layers])
|
||||
server_args.extend(['--ctx-size', args.ctx_size])
|
||||
server_args.extend(['--parallel', args.parallel])
|
||||
server_args.extend(['--batch-size', args.batch_size])
|
||||
server_args.extend(['--ubatch-size', args.ubatch_size])
|
||||
server_args.extend(['--n-predict', args.max_tokens * 2])
|
||||
server_args.append('--cont-batching')
|
||||
server_args.append('--metrics')
|
||||
server_args.append('--flash-attn')
|
||||
args = [str(arg) for arg in [server_path, *server_args]]
|
||||
print(f"bench: starting server with: {' '.join(args)}")
|
||||
pkwargs = {
|
||||
'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.PIPE
|
||||
}
|
||||
server_process = subprocess.Popen(
|
||||
args,
|
||||
**pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue]
|
||||
|
||||
def server_log(in_stream, out_stream):
|
||||
for line in iter(in_stream.readline, b''):
|
||||
print(line.decode('utf-8'), end='', file=out_stream)
|
||||
|
||||
thread_stdout = threading.Thread(target=server_log, args=(server_process.stdout, sys.stdout))
|
||||
thread_stdout.start()
|
||||
thread_stderr = threading.Thread(target=server_log, args=(server_process.stderr, sys.stderr))
|
||||
thread_stderr.start()
|
||||
|
||||
return server_process
|
||||
|
||||
|
||||
def is_server_listening(server_fqdn, server_port):
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||
result = sock.connect_ex((server_fqdn, server_port))
|
||||
_is_server_listening = result == 0
|
||||
if _is_server_listening:
|
||||
print(f"server is listening on {server_fqdn}:{server_port}...")
|
||||
return _is_server_listening
|
||||
|
||||
|
||||
def is_server_ready(server_fqdn, server_port):
|
||||
url = f"http://{server_fqdn}:{server_port}/health"
|
||||
response = requests.get(url)
|
||||
return response.status_code == 200
|
||||
|
||||
|
||||
def escape_metric_name(metric_name):
|
||||
return re.sub('[^A-Z0-9]', '_', metric_name.upper())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
9
tools/server/bench/prometheus.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
global:
|
||||
scrape_interval: 10s
|
||||
external_labels:
|
||||
llamacpp: 'server'
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'llama.cpp server'
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
2
tools/server/bench/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
matplotlib
|
||||
requests
|
||||
162
tools/server/bench/script.js
Normal file
@@ -0,0 +1,162 @@
|
||||
import sse from 'k6/x/sse'
|
||||
import {check, sleep} from 'k6'
|
||||
import {SharedArray} from 'k6/data'
|
||||
import {Counter, Rate, Trend} from 'k6/metrics'
|
||||
import exec from 'k6/execution';
|
||||
|
||||
// Server chat completions prefix
|
||||
const server_url = __ENV.SERVER_BENCH_URL ? __ENV.SERVER_BENCH_URL : 'http://localhost:8080/v1'
|
||||
|
||||
// Number of total prompts in the dataset - default 10m / 10 seconds/request * number of users
|
||||
const n_prompt = __ENV.SERVER_BENCH_N_PROMPTS ? parseInt(__ENV.SERVER_BENCH_N_PROMPTS) : 600 / 10 * 8
|
||||
|
||||
// Model name to request
|
||||
const model = __ENV.SERVER_BENCH_MODEL_ALIAS ? __ENV.SERVER_BENCH_MODEL_ALIAS : 'my-model'
|
||||
|
||||
// Dataset path
|
||||
const dataset_path = __ENV.SERVER_BENCH_DATASET ? __ENV.SERVER_BENCH_DATASET : './ShareGPT_V3_unfiltered_cleaned_split.json'
|
||||
|
||||
// Max tokens to predict
|
||||
const max_tokens = __ENV.SERVER_BENCH_MAX_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_TOKENS) : 512
|
||||
|
||||
// Max prompt tokens
|
||||
const n_prompt_tokens = __ENV.SERVER_BENCH_MAX_PROMPT_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_PROMPT_TOKENS) : 1024
|
||||
|
||||
// Max slot context
|
||||
const n_ctx_slot = __ENV.SERVER_BENCH_MAX_CONTEXT ? parseInt(__ENV.SERVER_BENCH_MAX_CONTEXT) : 2048
|
||||
|
||||
export function setup() {
|
||||
console.info(`Benchmark config: server_url=${server_url} n_prompt=${n_prompt} model=${model} dataset_path=${dataset_path} max_tokens=${max_tokens}`)
|
||||
}
|
||||
|
||||
const data = new SharedArray('conversations', function () {
|
||||
const tokenizer = (message) => message.split(/[\s,'".?]/)
|
||||
|
||||
return JSON.parse(open(dataset_path))
|
||||
// Filter out the conversations with less than 2 turns.
|
||||
.filter(data => data["conversations"].length >= 2)
|
||||
.filter(data => data["conversations"][0]["from"] === "human")
|
||||
.map(data => {
|
||||
return {
|
||||
prompt: data["conversations"][0]["value"],
|
||||
n_prompt_tokens: tokenizer(data["conversations"][0]["value"]).length,
|
||||
n_completion_tokens: tokenizer(data["conversations"][1]["value"]).length,
|
||||
}
|
||||
})
|
||||
// Filter out too short sequences
|
||||
.filter(conv => conv.n_prompt_tokens >= 4 && conv.n_completion_tokens >= 4)
|
||||
// Filter out too long sequences.
|
||||
.filter(conv => conv.n_prompt_tokens <= n_prompt_tokens && conv.n_prompt_tokens + conv.n_completion_tokens <= n_ctx_slot)
|
||||
// Keep only first n prompts
|
||||
.slice(0, n_prompt)
|
||||
})
|
||||
|
||||
const llamacpp_prompt_tokens = new Trend('llamacpp_prompt_tokens')
|
||||
const llamacpp_completion_tokens = new Trend('llamacpp_completion_tokens')
|
||||
|
||||
const llamacpp_tokens_second = new Trend('llamacpp_tokens_second')
|
||||
const llamacpp_prompt_processing_second = new Trend('llamacpp_prompt_processing_second')
|
||||
const llamacpp_emit_first_token_second = new Trend('llamacpp_emit_first_token_second')
|
||||
|
||||
const llamacpp_prompt_tokens_total_counter = new Counter('llamacpp_prompt_tokens_total_counter')
|
||||
const llamacpp_completion_tokens_total_counter = new Counter('llamacpp_completion_tokens_total_counter')
|
||||
|
||||
const llamacpp_completions_truncated_rate = new Rate('llamacpp_completions_truncated_rate')
|
||||
const llamacpp_completions_stop_rate = new Rate('llamacpp_completions_stop_rate')
|
||||
|
||||
export const options = {
|
||||
thresholds: {
|
||||
llamacpp_completions_truncated_rate: [
|
||||
// more than 80% of truncated input will abort the test
|
||||
{threshold: 'rate < 0.8', abortOnFail: true, delayAbortEval: '1m'},
|
||||
],
|
||||
},
|
||||
duration: '10m',
|
||||
vus: 8,
|
||||
}
|
||||
|
||||
export default function () {
|
||||
const conversation = data[exec.scenario.iterationInInstance % data.length]
|
||||
const payload = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are ChatGPT, an AI assistant.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": conversation.prompt,
|
||||
}
|
||||
],
|
||||
"model": model,
|
||||
"stream": true,
|
||||
"stream_options": {
|
||||
"include_usage": true, // False to be supported in llama.cpp server
|
||||
},
|
||||
"seed": 42,
|
||||
"max_tokens": max_tokens,
|
||||
"stop": ["<|im_end|>"] // This is temporary for phi-2 base (i.e. not instructed) since the server expects that the model always to emit BOS
|
||||
}
|
||||
|
||||
const params = {method: 'POST', body: JSON.stringify(payload)};
|
||||
|
||||
const startTime = new Date()
|
||||
let promptEvalEndTime = null
|
||||
let prompt_tokens = 0
|
||||
let completions_tokens = 0
|
||||
let finish_reason = null
|
||||
const res = sse.open(`${server_url}/chat/completions`, params, function (client) {
|
||||
client.on('event', function (event) {
|
||||
if (promptEvalEndTime == null) {
|
||||
promptEvalEndTime = new Date()
|
||||
llamacpp_emit_first_token_second.add((promptEvalEndTime - startTime) / 1.e3)
|
||||
}
|
||||
|
||||
if (event.data === '[DONE]' || event.data === '') {
|
||||
return
|
||||
}
|
||||
|
||||
let chunk = JSON.parse(event.data)
|
||||
|
||||
if (chunk.choices && chunk.choices.length > 0) {
|
||||
let choice = chunk.choices[0]
|
||||
if (choice.finish_reason) {
|
||||
finish_reason = choice.finish_reason
|
||||
}
|
||||
}
|
||||
|
||||
if (chunk.usage) {
|
||||
prompt_tokens = chunk.usage.prompt_tokens
|
||||
llamacpp_prompt_tokens.add(prompt_tokens)
|
||||
llamacpp_prompt_tokens_total_counter.add(prompt_tokens)
|
||||
|
||||
completions_tokens = chunk.usage.completion_tokens
|
||||
llamacpp_completion_tokens.add(completions_tokens)
|
||||
llamacpp_completion_tokens_total_counter.add(completions_tokens)
|
||||
}
|
||||
})
|
||||
|
||||
client.on('error', function (e) {
|
||||
console.log('An unexpected error occurred: ', e.error());
|
||||
throw e;
|
||||
})
|
||||
})
|
||||
|
||||
check(res, {'success completion': (r) => r.status === 200})
|
||||
|
||||
const endTime = new Date()
|
||||
|
||||
const promptEvalTime = promptEvalEndTime - startTime
|
||||
if (promptEvalTime > 0) {
|
||||
llamacpp_prompt_processing_second.add(prompt_tokens / (promptEvalEndTime - startTime) * 1.e3)
|
||||
}
|
||||
|
||||
const completion_time = endTime - promptEvalEndTime
|
||||
if (completions_tokens > 0 && completion_time > 0) {
|
||||
llamacpp_tokens_second.add(completions_tokens / completion_time * 1.e3)
|
||||
}
|
||||
llamacpp_completions_truncated_rate.add(finish_reason === 'length')
|
||||
llamacpp_completions_stop_rate.add(finish_reason === 'stop')
|
||||
|
||||
sleep(0.3)
|
||||
}
|
||||
109
tools/server/chat-llama2.sh
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
API_URL="${API_URL:-http://127.0.0.1:8080}"
|
||||
|
||||
CHAT=(
|
||||
"Hello, Assistant."
|
||||
"Hello. How may I help you today?"
|
||||
)
|
||||
|
||||
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
||||
|
||||
trim() {
|
||||
shopt -s extglob
|
||||
set -- "${1##+([[:space:]])}"
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
trim_trailing() {
|
||||
shopt -s extglob
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
format_prompt() {
|
||||
if [[ "${#CHAT[@]}" -eq 0 ]]; then
|
||||
echo -n "[INST] <<SYS>>\n${INSTRUCTION}\n<</SYS>>"
|
||||
else
|
||||
LAST_INDEX=$(( ${#CHAT[@]} - 1 ))
|
||||
echo -n "${CHAT[$LAST_INDEX]}\n[INST] $1 [/INST]"
|
||||
fi
|
||||
}
|
||||
|
||||
tokenize() {
|
||||
curl \
|
||||
--silent \
|
||||
--request POST \
|
||||
--url "${API_URL}/tokenize" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
|
||||
| jq '.tokens[]'
|
||||
}
|
||||
|
||||
N_KEEP=$(tokenize "[INST] <<SYS>>\n${INSTRUCTION}\n<</SYS>>" | wc -l)
|
||||
|
||||
chat_completion() {
|
||||
PROMPT="$(trim_trailing "$(format_prompt "$1")")"
|
||||
DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
|
||||
prompt: .,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: $n_keep,
|
||||
n_predict: 1024,
|
||||
stop: ["[INST]"],
|
||||
stream: true
|
||||
}')"
|
||||
|
||||
# Create a temporary file to hold the Python output
|
||||
TEMPFILE=$(mktemp)
|
||||
|
||||
exec 3< <(curl \
|
||||
--silent \
|
||||
--no-buffer \
|
||||
--request POST \
|
||||
--url "${API_URL}/completion" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "${DATA}")
|
||||
|
||||
python -c "
|
||||
import json
|
||||
import sys
|
||||
|
||||
answer = ''
|
||||
while True:
|
||||
line = sys.stdin.readline()
|
||||
if not line:
|
||||
break
|
||||
if line.startswith('data: '):
|
||||
json_content = line[6:].strip()
|
||||
content = json.loads(json_content)['content']
|
||||
sys.stdout.write(content)
|
||||
sys.stdout.flush()
|
||||
answer += content
|
||||
|
||||
answer = answer.rstrip('\n')
|
||||
|
||||
# Write the answer to the temporary file
|
||||
with open('$TEMPFILE', 'w') as f:
|
||||
f.write(answer)
|
||||
" <&3
|
||||
|
||||
exec 3<&-
|
||||
|
||||
# Read the answer from the temporary file
|
||||
ANSWER=$(cat $TEMPFILE)
|
||||
|
||||
# Clean up the temporary file
|
||||
rm $TEMPFILE
|
||||
|
||||
printf "\n"
|
||||
|
||||
CHAT+=("$1" "$(trim "$ANSWER")")
|
||||
}
|
||||
|
||||
while true; do
|
||||
echo -en "\033[0;32m" # Green color
|
||||
read -r -e -p "> " QUESTION
|
||||
echo -en "\033[0m" # Reset color
|
||||
chat_completion "${QUESTION}"
|
||||
done
|
||||
131
tools/server/chat.mjs
Normal file
@@ -0,0 +1,131 @@
|
||||
import * as readline from 'node:readline'
|
||||
import { stdin, stdout } from 'node:process'
|
||||
import { readFileSync } from 'node:fs'
|
||||
import { SchemaConverter } from './public_legacy/json-schema-to-grammar.mjs'
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const grammarJsonSchemaFile = args.find(
|
||||
(_, index) => args[index - 1] === "--grammar-json-schema"
|
||||
);
|
||||
|
||||
const no_cached_prompt = args.find(
|
||||
(_, index) => args[index - 1] === "--no-cache-prompt"
|
||||
) ?? "false";
|
||||
|
||||
const grammarFile = args.find((_, index) => args[index - 1] === "--grammar");
|
||||
|
||||
// Example usage: function,arguments
|
||||
const grammarJsonSchemaPropOrder = args.find(
|
||||
(_, index) => args[index - 1] === "--grammar-json-schema-prop-order"
|
||||
);
|
||||
const propOrder = grammarJsonSchemaPropOrder
|
||||
? grammarJsonSchemaPropOrder
|
||||
.split(",")
|
||||
.reduce((acc, cur, index) => ({ ...acc, [cur]: index }), {})
|
||||
: {};
|
||||
|
||||
let grammar = null
|
||||
if (grammarJsonSchemaFile) {
|
||||
let schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8'))
|
||||
const converter = new SchemaConverter({prop_order: propOrder, allow_fetch: true})
|
||||
schema = await converter.resolveRefs(schema, grammarJsonSchemaFile)
|
||||
converter.visit(schema, '')
|
||||
grammar = converter.formatGrammar()
|
||||
}
|
||||
if (grammarFile) {
|
||||
grammar = readFileSync(grammarFile, 'utf-8')
|
||||
}
|
||||
|
||||
// for cached prompt
|
||||
let slot_id = -1;
|
||||
|
||||
const API_URL = 'http://127.0.0.1:8080'
|
||||
|
||||
const chat = [
|
||||
{
|
||||
human: "Hello, Assistant.",
|
||||
assistant: "Hello. How may I help you today?"
|
||||
},
|
||||
{
|
||||
human: "Please tell me the largest city in Europe.",
|
||||
assistant: "Sure. The largest city in Europe is Moscow, the capital of Russia."
|
||||
},
|
||||
]
|
||||
|
||||
const instruction = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.`
|
||||
|
||||
function format_prompt(question) {
|
||||
return `${instruction}\n${
|
||||
chat.map(m =>`### Human: ${m.human}\n### Assistant: ${m.assistant}`).join("\n")
|
||||
}\n### Human: ${question}\n### Assistant:`
|
||||
}
|
||||
|
||||
async function tokenize(content) {
|
||||
const result = await fetch(`${API_URL}/tokenize`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ content })
|
||||
})
|
||||
|
||||
if (!result.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
return await result.json().tokens
|
||||
}
|
||||
|
||||
const n_keep = await tokenize(instruction).length
|
||||
|
||||
async function chat_completion(question) {
|
||||
const result = await fetch(`${API_URL}/completion`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
prompt: format_prompt(question),
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: n_keep,
|
||||
n_predict: 256,
|
||||
cache_prompt: no_cached_prompt === "false",
|
||||
slot_id: slot_id,
|
||||
stop: ["\n### Human:"], // stop completion after generating this
|
||||
grammar,
|
||||
stream: true,
|
||||
})
|
||||
})
|
||||
|
||||
if (!result.ok) {
|
||||
return
|
||||
}
|
||||
|
||||
let answer = ''
|
||||
|
||||
for await (var chunk of result.body) {
|
||||
const t = Buffer.from(chunk).toString('utf8')
|
||||
if (t.startsWith('data: ')) {
|
||||
const message = JSON.parse(t.substring(6))
|
||||
slot_id = message.slot_id
|
||||
answer += message.content
|
||||
process.stdout.write(message.content)
|
||||
if (message.stop) {
|
||||
if (message.truncated) {
|
||||
chat.shift()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process.stdout.write('\n')
|
||||
chat.push({ human: question, assistant: answer.trimStart() })
|
||||
}
|
||||
|
||||
const rl = readline.createInterface({ input: stdin, output: stdout });
|
||||
|
||||
const readlineQuestion = (rl, query, options) => new Promise((resolve, reject) => {
|
||||
rl.question(query, options, resolve)
|
||||
});
|
||||
|
||||
while(true) {
|
||||
const question = await readlineQuestion(rl, '> ')
|
||||
await chat_completion(question)
|
||||
}
|
||||
80
tools/server/chat.sh
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
API_URL="${API_URL:-http://127.0.0.1:8080}"
|
||||
|
||||
CHAT=(
|
||||
"Hello, Assistant."
|
||||
"Hello. How may I help you today?"
|
||||
"Please tell me the largest city in Europe."
|
||||
"Sure. The largest city in Europe is Moscow, the capital of Russia."
|
||||
)
|
||||
|
||||
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
||||
|
||||
trim() {
|
||||
shopt -s extglob
|
||||
set -- "${1##+([[:space:]])}"
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
trim_trailing() {
|
||||
shopt -s extglob
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
format_prompt() {
|
||||
echo -n "${INSTRUCTION}"
|
||||
printf "\n### Human: %s\n### Assistant: %s" "${CHAT[@]}" "$1"
|
||||
}
|
||||
|
||||
tokenize() {
|
||||
curl \
|
||||
--silent \
|
||||
--request POST \
|
||||
--url "${API_URL}/tokenize" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
|
||||
| jq '.tokens[]'
|
||||
}
|
||||
|
||||
N_KEEP=$(tokenize "${INSTRUCTION}" | wc -l)
|
||||
|
||||
chat_completion() {
|
||||
PROMPT="$(trim_trailing "$(format_prompt "$1")")"
|
||||
DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
|
||||
prompt: .,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: $n_keep,
|
||||
n_predict: 256,
|
||||
cache_prompt: true,
|
||||
stop: ["\n### Human:"],
|
||||
stream: true
|
||||
}')"
|
||||
|
||||
ANSWER=''
|
||||
|
||||
while IFS= read -r LINE; do
|
||||
if [[ $LINE = data:* ]]; then
|
||||
CONTENT="$(echo "${LINE:5}" | jq -r '.content')"
|
||||
printf "%s" "${CONTENT}"
|
||||
ANSWER+="${CONTENT}"
|
||||
fi
|
||||
done < <(curl \
|
||||
--silent \
|
||||
--no-buffer \
|
||||
--request POST \
|
||||
--url "${API_URL}/completion" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "${DATA}")
|
||||
|
||||
printf "\n"
|
||||
|
||||
CHAT+=("$1" "$(trim "$ANSWER")")
|
||||
}
|
||||
|
||||
while true; do
|
||||
read -r -e -p "> " QUESTION
|
||||
chat_completion "${QUESTION}"
|
||||
done
|
||||
BIN
tools/server/public/index.html.gz
Normal file
12
tools/server/public/loading.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="5">
|
||||
</head>
|
||||
<body>
|
||||
<div id="loading">
|
||||
The model is loading. Please wait.<br/>
|
||||
The user interface will appear soon.
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
402
tools/server/public_legacy/colorthemes.css
Executable file
@@ -0,0 +1,402 @@
|
||||
@import url("theme-snowstorm.css");
|
||||
@import url("theme-polarnight.css");
|
||||
@import url("theme-ketivah.css");
|
||||
@import url("theme-mangotango.css");
|
||||
@import url("theme-playground.css");
|
||||
@import url("theme-beeninorder.css");
|
||||
|
||||
:root {
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(217.5, 26.7%, 94.1%);
|
||||
--primary-color-1-hue: 217.5;
|
||||
--primary-color-1-saturation: 26.7%;
|
||||
--primary-color-1-lightness: 94.1%;
|
||||
|
||||
--primary-color-2: hsl(218.2, 26.8%, 92.0%);
|
||||
--primary-color-2-hue: 218.2;
|
||||
--primary-color-2-saturation: 26.8%;
|
||||
--primary-color-2-lightness: 92.0%;
|
||||
|
||||
--primary-color-3: hsl(218.8, 27.9%, 88.0%);
|
||||
--primary-color-3-hue: 218.8;
|
||||
--primary-color-3-saturation: 27.9%;
|
||||
--primary-color-3-lightness: 88.0%;
|
||||
|
||||
--primary-color-4: hsl(218.8, 18.3%, 81.8%);
|
||||
--primary-color-4-hue: 218.8;
|
||||
--primary-color-4-saturation: 18.3%;
|
||||
--primary-color-4-lightness: 81.8%;
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(220.0, 16.4%, 21.6%);
|
||||
--secondary-color-1-hue: 220.0;
|
||||
--secondary-color-1-saturation: 16.4%;
|
||||
--secondary-color-1-lightness: 21.6%;
|
||||
|
||||
--secondary-color-2: hsl(221.7, 16.3%, 27.6%);
|
||||
--secondary-color-2-hue: 221.7;
|
||||
--secondary-color-2-saturation: 16.3%;
|
||||
--secondary-color-2-lightness: 27.6%;
|
||||
|
||||
--secondary-color-3: hsl(220.0, 16.8%, 31.6%);
|
||||
--secondary-color-3-hue: 220.0;
|
||||
--secondary-color-3-saturation: 16.8%;
|
||||
--secondary-color-3-lightness: 31.6%;
|
||||
|
||||
--secondary-color-4: hsl(220.0, 16.5%, 35.7%);
|
||||
--secondary-color-4-hue: 220.0;
|
||||
--secondary-color-4-saturation: 16.5%;
|
||||
--secondary-color-4-lightness: 35.7%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%);
|
||||
--theme-nuance-color-1-hue: 178.7;
|
||||
--theme-nuance-color-1-saturation: 25.1%;
|
||||
--theme-nuance-color-1-lightness: 64.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%);
|
||||
--theme-nuance-color-2-hue: 193.3;
|
||||
--theme-nuance-color-2-saturation: 43.4%;
|
||||
--theme-nuance-color-2-lightness: 67.5%;
|
||||
|
||||
--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%);
|
||||
--theme-nuance-color-3-hue: 210.0;
|
||||
--theme-nuance-color-3-saturation: 34.0%;
|
||||
--theme-nuance-color-3-lightness: 63.1%;
|
||||
|
||||
--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%);
|
||||
--theme-nuance-color-4-hue: 213.1;
|
||||
--theme-nuance-color-4-saturation: 32.0%;
|
||||
--theme-nuance-color-4-lightness: 52.2%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(32.5, 80%, 50%);
|
||||
--theme-orange-color: hsl(32.5, 70%, 45%);
|
||||
--theme-yellow-color: hsl(40.0, 0.6%, 73.3%);
|
||||
--theme-green-color: hsl(92.4, 27.8%, 64.7%);
|
||||
--theme-purple-color: hsl(311.1, 20.2%, 63.1%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--theme-nuance-color-3);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--primary-color-1);
|
||||
--button-alert-color-hover: var(--theme-orange-color);
|
||||
--button-alert-border-hover: var(--theme-orange-color);
|
||||
|
||||
--button-alert-text-active: var(--primary-color-1);
|
||||
--button-alert-color-active: var(--theme-red-color);
|
||||
--button-alert-border-active: var(--theme-red-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text: var(--secondary-color-1);
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(217.5,
|
||||
calc(var(--secondary-color-1-saturation) + 35%),
|
||||
calc(var(--secondary-color-1-lightness) - 30%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 35%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 50%));
|
||||
|
||||
--button-secondary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
--button-secondary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
--button-secondary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) + 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 55%));
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
.theme-template {
|
||||
|
||||
|
||||
If light theme: should go from bright to darker
|
||||
If dark theme: should go from dark to brighter
|
||||
ideally this should not be anything but steps of
|
||||
gray or slightly variants from it
|
||||
|
||||
--primary-color-1: #2E3440;
|
||||
--primary-color-2: #3B4252;
|
||||
--primary-color-3: #434C5E;
|
||||
--primary-color-4: #4C566A;
|
||||
|
||||
|
||||
|
||||
If light theme: should go from dark to brighter
|
||||
If dark theme: should go from bright to darker
|
||||
ideally this should not be anything but steps of
|
||||
gray or slightly variants from it
|
||||
|
||||
--secondary-color-1: #ECEFF4;
|
||||
--secondary-color-2: #E5E9F0;
|
||||
--secondary-color-3: #D8DEE9;
|
||||
--secondary-color-4: #C8CED9;
|
||||
|
||||
|
||||
|
||||
Choose wisely nuance colors. It is not easy to find
|
||||
4 harmonizing nuance colors. But keep in mind, that
|
||||
only one accent color could work too.
|
||||
|
||||
--theme-nuance-color-1: #8FBCBB;
|
||||
--theme-nuance-color-2: #88C0D0;
|
||||
--theme-nuance-color-3: #81A1C1;
|
||||
--theme-nuance-color-4: #5E81AC;
|
||||
|
||||
|
||||
|
||||
adapt the color red, orange, yellow, green,
|
||||
purple to the 'mood' of your overall design
|
||||
e.g is it low-contrast? vibrant? dynamic? etc
|
||||
|
||||
--theme-red-color: #BF616A;
|
||||
--theme-orange-color: #D08770;
|
||||
--theme-yellow-color: #EBCB8B;
|
||||
--theme-green-color: #A3BE8C;
|
||||
--theme-purple-color: #B48EAD;
|
||||
|
||||
|
||||
|
||||
NOTE: comment all those line `--- ...` out
|
||||
------------------------------------------------
|
||||
--background-color-1:
|
||||
--background-color-2:
|
||||
--background-color-3:
|
||||
--background-color-4:
|
||||
|
||||
--border-color-1:
|
||||
--border-color-2:
|
||||
--border-color-3:
|
||||
|
||||
--border-focus-color:
|
||||
--border-focus-shadow:
|
||||
|
||||
--text-color-plain:
|
||||
--text-color-subtile-1:
|
||||
--text-color-subtile-2:
|
||||
|
||||
--code-background-color:
|
||||
--code-text-color:
|
||||
|
||||
--ui-range-thumb-color:
|
||||
--ui-range-thumb-border:
|
||||
|
||||
--textarea-border-color:
|
||||
|
||||
|
||||
|
||||
-------------------------------------------
|
||||
--button-alert-text-hover:
|
||||
--button-alert-color-hover:
|
||||
--button-alert-border-hover:
|
||||
|
||||
--button-alert-text-active:
|
||||
--button-alert-color-active:
|
||||
--button-alert-border-active:
|
||||
|
||||
|
||||
|
||||
----------- PRIMARY -----------------------
|
||||
--button should immediately catch the eye--
|
||||
|
||||
--button-primary-text:
|
||||
--button-primary-color:
|
||||
--button-primary-border:
|
||||
|
||||
|
||||
---------hover----------
|
||||
--button-primary-text-hover:
|
||||
--button-primary-color-hover:
|
||||
--button-primary-border-hover:
|
||||
|
||||
|
||||
---------active---------
|
||||
--button-primary-text-active:
|
||||
--button-primary-color-active:
|
||||
--button-primary-border-active:
|
||||
|
||||
|
||||
|
||||
------------ SECONDARY ------------------------
|
||||
--button should NOT immediately catch the eye--
|
||||
|
||||
--button-secondary-text:
|
||||
--button-secondary-color:
|
||||
--button-secondary-border:
|
||||
|
||||
|
||||
---------hover----------
|
||||
--button-secondary-text-hover:
|
||||
--button-secondary-color-hover:
|
||||
--button-secondary-border-hover:
|
||||
|
||||
|
||||
---------active---------
|
||||
--button-secondary-text-active:
|
||||
--button-secondary-color-active:
|
||||
--button-secondary-border-active:
|
||||
|
||||
|
||||
|
||||
---------- TERTIARY -----------------------
|
||||
---------- disabled buttons ---------------
|
||||
--button-tertiary-text:
|
||||
--button-tertiary-color:
|
||||
--button-tertiary-border:
|
||||
|
||||
|
||||
---------hover----------
|
||||
--button-tertiary-text:
|
||||
--button-tertiary-color:
|
||||
--button-tertiary-border:
|
||||
|
||||
}
|
||||
|
||||
*/
|
||||
209
tools/server/public_legacy/completion.js
Normal file
@@ -0,0 +1,209 @@
|
||||
const paramDefaults = {
|
||||
stream: true,
|
||||
n_predict: 500,
|
||||
temperature: 0.2,
|
||||
stop: ["</s>"]
|
||||
};
|
||||
|
||||
let generation_settings = null;
|
||||
|
||||
|
||||
// Completes the prompt as a generator. Recommended for most use cases.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// import { llama } from '/completion.js'
|
||||
//
|
||||
// const request = llama("Tell me a joke", {n_predict: 800})
|
||||
// for await (const chunk of request) {
|
||||
// document.write(chunk.data.content)
|
||||
// }
|
||||
//
|
||||
export async function* llama(prompt, params = {}, config = {}) {
|
||||
let controller = config.controller;
|
||||
const api_url = config.api_url?.replace(/\/+$/, '') || "";
|
||||
|
||||
if (!controller) {
|
||||
controller = new AbortController();
|
||||
}
|
||||
|
||||
const completionParams = { ...paramDefaults, ...params, prompt };
|
||||
|
||||
const response = await fetch(`${api_url}${config.endpoint || '/completion'}`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(completionParams),
|
||||
headers: {
|
||||
'Connection': 'keep-alive',
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
...(params.api_key ? {'Authorization': `Bearer ${params.api_key}`} : {})
|
||||
},
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
let content = "";
|
||||
let leftover = ""; // Buffer for partially read lines
|
||||
|
||||
try {
|
||||
let cont = true;
|
||||
|
||||
while (cont) {
|
||||
const result = await reader.read();
|
||||
if (result.done) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Add any leftover data to the current chunk of data
|
||||
const text = leftover + decoder.decode(result.value);
|
||||
|
||||
// Check if the last character is a line break
|
||||
const endsWithLineBreak = text.endsWith('\n');
|
||||
|
||||
// Split the text into lines
|
||||
let lines = text.split('\n');
|
||||
|
||||
// If the text doesn't end with a line break, then the last line is incomplete
|
||||
// Store it in leftover to be added to the next chunk of data
|
||||
if (!endsWithLineBreak) {
|
||||
leftover = lines.pop();
|
||||
} else {
|
||||
leftover = ""; // Reset leftover if we have a line break at the end
|
||||
}
|
||||
|
||||
// Parse all sse events and add them to result
|
||||
const regex = /^(\S+):\s(.*)$/gm;
|
||||
for (const line of lines) {
|
||||
const match = regex.exec(line);
|
||||
if (match) {
|
||||
result[match[1]] = match[2];
|
||||
if (result.data === '[DONE]') {
|
||||
cont = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// since we know this is llama.cpp, let's just decode the json in data
|
||||
if (result.data) {
|
||||
result.data = JSON.parse(result.data);
|
||||
content += result.data.content;
|
||||
|
||||
// yield
|
||||
yield result;
|
||||
|
||||
// if we got a stop token from server, we will break here
|
||||
if (result.data.stop) {
|
||||
if (result.data.generation_settings) {
|
||||
generation_settings = result.data.generation_settings;
|
||||
}
|
||||
cont = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (result.error) {
|
||||
try {
|
||||
result.error = JSON.parse(result.error);
|
||||
if (result.error.message.includes('slot unavailable')) {
|
||||
// Throw an error to be caught by upstream callers
|
||||
throw new Error('slot unavailable');
|
||||
} else {
|
||||
console.error(`llama.cpp error [${result.error.code} - ${result.error.type}]: ${result.error.message}`);
|
||||
}
|
||||
} catch(e) {
|
||||
console.error(`llama.cpp error ${result.error}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.name !== 'AbortError') {
|
||||
console.error("llama error: ", e);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
finally {
|
||||
controller.abort();
|
||||
}
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
// Call llama, return an event target that you can subscribe to
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// import { llamaEventTarget } from '/completion.js'
|
||||
//
|
||||
// const conn = llamaEventTarget(prompt)
|
||||
// conn.addEventListener("message", (chunk) => {
|
||||
// document.write(chunk.detail.content)
|
||||
// })
|
||||
//
|
||||
export const llamaEventTarget = (prompt, params = {}, config = {}) => {
|
||||
const eventTarget = new EventTarget();
|
||||
(async () => {
|
||||
let content = "";
|
||||
for await (const chunk of llama(prompt, params, config)) {
|
||||
if (chunk.data) {
|
||||
content += chunk.data.content;
|
||||
eventTarget.dispatchEvent(new CustomEvent("message", { detail: chunk.data }));
|
||||
}
|
||||
if (chunk.data.generation_settings) {
|
||||
eventTarget.dispatchEvent(new CustomEvent("generation_settings", { detail: chunk.data.generation_settings }));
|
||||
}
|
||||
if (chunk.data.timings) {
|
||||
eventTarget.dispatchEvent(new CustomEvent("timings", { detail: chunk.data.timings }));
|
||||
}
|
||||
}
|
||||
eventTarget.dispatchEvent(new CustomEvent("done", { detail: { content } }));
|
||||
})();
|
||||
return eventTarget;
|
||||
}
|
||||
|
||||
// Call llama, return a promise that resolves to the completed text. This does not support streaming
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// llamaPromise(prompt).then((content) => {
|
||||
// document.write(content)
|
||||
// })
|
||||
//
|
||||
// or
|
||||
//
|
||||
// const content = await llamaPromise(prompt)
|
||||
// document.write(content)
|
||||
//
|
||||
export const llamaPromise = (prompt, params = {}, config = {}) => {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
let content = "";
|
||||
try {
|
||||
for await (const chunk of llama(prompt, params, config)) {
|
||||
content += chunk.data.content;
|
||||
}
|
||||
resolve(content);
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* (deprecated)
|
||||
*/
|
||||
export const llamaComplete = async (params, controller, callback) => {
|
||||
for await (const chunk of llama(params.prompt, params, { controller })) {
|
||||
callback(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the model info from the server. This is useful for getting the context window and so on.
|
||||
export const llamaModelInfo = async (config = {}) => {
|
||||
if (!generation_settings) {
|
||||
const api_url = config.api_url?.replace(/\/+$/, '') || "";
|
||||
const props = await fetch(`${api_url}/props`).then(r => r.json());
|
||||
generation_settings = props.default_generation_settings;
|
||||
}
|
||||
return generation_settings;
|
||||
}
|
||||
BIN
tools/server/public_legacy/favicon.ico
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
1190
tools/server/public_legacy/index-new.html
Normal file
1301
tools/server/public_legacy/index.html
Normal file
1
tools/server/public_legacy/index.js
Normal file
856
tools/server/public_legacy/json-schema-to-grammar.mjs
Normal file
@@ -0,0 +1,856 @@
|
||||
// WARNING: This file was ported from json_schema_to_grammar.py, please fix bugs / add features there first.
|
||||
const SPACE_RULE = '| " " | "\\n"{1,2} [ \\t]{0,20}';
|
||||
|
||||
function _buildRepetition(itemRule, minItems, maxItems, opts={}) {
|
||||
if (maxItems == 0) {
|
||||
return '';
|
||||
}
|
||||
if (minItems === 0 && maxItems === 1) {
|
||||
return `${itemRule}?`;
|
||||
}
|
||||
|
||||
|
||||
const separatorRule = opts.separatorRule ?? '';
|
||||
const itemRuleIsLiteral = opts.itemRuleIsLiteral ?? false
|
||||
|
||||
if (separatorRule === '') {
|
||||
if (minItems === 1 && maxItems === undefined) {
|
||||
return `${itemRule}+`;
|
||||
} else if (minItems === 0 && maxItems === undefined) {
|
||||
return `${itemRule}*`;
|
||||
} else {
|
||||
return `${itemRule}{${minItems},${maxItems !== undefined ? maxItems : ''}}`;
|
||||
}
|
||||
}
|
||||
|
||||
const result = itemRule + ' ' + _buildRepetition(`(${separatorRule} ${itemRule})`, minItems > 0 ? minItems - 1 : 0, maxItems !== undefined ? maxItems - 1 : undefined);
|
||||
return minItems === 0 ? `(${result})?` : result;
|
||||
}
|
||||
|
||||
function _generateMinMaxInt(minValue, maxValue, out, decimalsLeft = 16, topLevel = true) {
|
||||
const hasMin = minValue !== null;
|
||||
const hasMax = maxValue !== null;
|
||||
|
||||
function digitRange(fromChar, toChar) {
|
||||
out.push("[");
|
||||
if (fromChar === toChar) {
|
||||
out.push(fromChar);
|
||||
} else {
|
||||
out.push(fromChar);
|
||||
out.push("-");
|
||||
out.push(toChar);
|
||||
}
|
||||
out.push("]");
|
||||
}
|
||||
|
||||
function moreDigits(minDigits, maxDigits) {
|
||||
out.push("[0-9]");
|
||||
if (minDigits === maxDigits && minDigits === 1) {
|
||||
return;
|
||||
}
|
||||
out.push("{");
|
||||
out.push(minDigits.toString());
|
||||
if (maxDigits !== minDigits) {
|
||||
out.push(",");
|
||||
if (maxDigits !== Number.MAX_SAFE_INTEGER) {
|
||||
out.push(maxDigits.toString());
|
||||
}
|
||||
}
|
||||
out.push("}");
|
||||
}
|
||||
|
||||
function uniformRange(fromStr, toStr) {
|
||||
let i = 0;
|
||||
while (i < fromStr.length && fromStr[i] === toStr[i]) {
|
||||
i++;
|
||||
}
|
||||
if (i > 0) {
|
||||
out.push("\"");
|
||||
out.push(fromStr.slice(0, i));
|
||||
out.push("\"");
|
||||
}
|
||||
if (i < fromStr.length) {
|
||||
if (i > 0) {
|
||||
out.push(" ");
|
||||
}
|
||||
const subLen = fromStr.length - i - 1;
|
||||
if (subLen > 0) {
|
||||
const fromSub = fromStr.slice(i + 1);
|
||||
const toSub = toStr.slice(i + 1);
|
||||
const subZeros = "0".repeat(subLen);
|
||||
const subNines = "9".repeat(subLen);
|
||||
|
||||
let toReached = false;
|
||||
out.push("(");
|
||||
if (fromSub === subZeros) {
|
||||
digitRange(fromStr[i], String.fromCharCode(toStr.charCodeAt(i) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(subLen, subLen);
|
||||
} else {
|
||||
out.push("[");
|
||||
out.push(fromStr[i]);
|
||||
out.push("] ");
|
||||
out.push("(");
|
||||
uniformRange(fromSub, subNines);
|
||||
out.push(")");
|
||||
if (fromStr.charCodeAt(i) < toStr.charCodeAt(i) - 1) {
|
||||
out.push(" | ");
|
||||
if (toSub === subNines) {
|
||||
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), toStr[i]);
|
||||
toReached = true;
|
||||
} else {
|
||||
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), String.fromCharCode(toStr.charCodeAt(i) - 1));
|
||||
}
|
||||
out.push(" ");
|
||||
moreDigits(subLen, subLen);
|
||||
}
|
||||
}
|
||||
if (!toReached) {
|
||||
out.push(" | ");
|
||||
digitRange(toStr[i], toStr[i]);
|
||||
out.push(" ");
|
||||
uniformRange(subZeros, toSub);
|
||||
}
|
||||
out.push(")");
|
||||
} else {
|
||||
out.push("[");
|
||||
out.push(fromStr[i]);
|
||||
out.push("-");
|
||||
out.push(toStr[i]);
|
||||
out.push("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasMin && hasMax) {
|
||||
if (minValue < 0 && maxValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(-maxValue, -minValue, out, decimalsLeft, true);
|
||||
out.push(")");
|
||||
return;
|
||||
}
|
||||
|
||||
if (minValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(0, -minValue, out, decimalsLeft, true);
|
||||
out.push(") | ");
|
||||
minValue = 0;
|
||||
}
|
||||
|
||||
let minS = minValue.toString();
|
||||
const maxS = maxValue.toString();
|
||||
const minDigits = minS.length;
|
||||
const maxDigits = maxS.length;
|
||||
|
||||
for (let digits = minDigits; digits < maxDigits; digits++) {
|
||||
uniformRange(minS, "9".repeat(digits));
|
||||
minS = "1" + "0".repeat(digits);
|
||||
out.push(" | ");
|
||||
}
|
||||
uniformRange(minS, maxS);
|
||||
return;
|
||||
}
|
||||
|
||||
const lessDecimals = Math.max(decimalsLeft - 1, 1);
|
||||
|
||||
if (hasMin) {
|
||||
if (minValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(null, -minValue, out, decimalsLeft, false);
|
||||
out.push(") | [0] | [1-9] ");
|
||||
moreDigits(0, decimalsLeft - 1);
|
||||
} else if (minValue === 0) {
|
||||
if (topLevel) {
|
||||
out.push("[0] | [1-9] ");
|
||||
moreDigits(0, lessDecimals);
|
||||
} else {
|
||||
moreDigits(1, decimalsLeft);
|
||||
}
|
||||
} else if (minValue <= 9) {
|
||||
const c = minValue.toString();
|
||||
const range_start = topLevel ? '1' : '0';
|
||||
if (c > range_start) {
|
||||
digitRange(range_start, String.fromCharCode(c.charCodeAt(0) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(1, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
digitRange(c, "9");
|
||||
out.push(" ");
|
||||
moreDigits(0, lessDecimals);
|
||||
} else {
|
||||
const minS = minValue.toString();
|
||||
const length = minS.length;
|
||||
const c = minS[0];
|
||||
|
||||
if (c > "1") {
|
||||
digitRange(topLevel ? "1" : "0", String.fromCharCode(c.charCodeAt(0) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(length, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
digitRange(c, c);
|
||||
out.push(" (");
|
||||
_generateMinMaxInt(parseInt(minS.slice(1)), null, out, lessDecimals, false);
|
||||
out.push(")");
|
||||
if (c < "9") {
|
||||
out.push(" | ");
|
||||
digitRange(String.fromCharCode(c.charCodeAt(0) + 1), "9");
|
||||
out.push(" ");
|
||||
moreDigits(length - 1, lessDecimals);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (hasMax) {
|
||||
if (maxValue >= 0) {
|
||||
if (topLevel) {
|
||||
out.push("\"-\" [1-9] ");
|
||||
moreDigits(0, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
_generateMinMaxInt(0, maxValue, out, decimalsLeft, true);
|
||||
} else {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(-maxValue, null, out, decimalsLeft, false);
|
||||
out.push(")");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error("At least one of minValue or maxValue must be set");
|
||||
}
|
||||
|
||||
class BuiltinRule {
|
||||
constructor(content, deps) {
|
||||
this.content = content;
|
||||
this.deps = deps || [];
|
||||
}
|
||||
}
|
||||
|
||||
const PRIMITIVE_RULES = {
|
||||
boolean : new BuiltinRule('("true" | "false") space', []),
|
||||
'decimal-part' : new BuiltinRule('[0-9]{1,16}', []),
|
||||
'integral-part': new BuiltinRule('[0] | [1-9] [0-9]{0,15}', []),
|
||||
number : new BuiltinRule('("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', ['integral-part', 'decimal-part']),
|
||||
integer : new BuiltinRule('("-"? integral-part) space', ['integral-part']),
|
||||
value : new BuiltinRule('object | array | string | number | boolean | null', ['object', 'array', 'string', 'number', 'boolean', 'null']),
|
||||
object : new BuiltinRule('"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', ['string', 'value']),
|
||||
array : new BuiltinRule('"[" space ( value ("," space value)* )? "]" space', ['value']),
|
||||
uuid : new BuiltinRule('"\\"" [0-9a-fA-F]{8} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{12} "\\"" space', []),
|
||||
char : new BuiltinRule(`[^"\\\\\\x7F\\x00-\\x1F] | [\\\\] (["\\\\bfnrt] | "u" [0-9a-fA-F]{4})`, []),
|
||||
string : new BuiltinRule(`"\\"" char* "\\"" space`, ['char']),
|
||||
null : new BuiltinRule('"null" space', []),
|
||||
};
|
||||
|
||||
// TODO: support "uri", "email" string formats
|
||||
const STRING_FORMAT_RULES = {
|
||||
'date' : new BuiltinRule('[0-9]{4} "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )', []),
|
||||
'time' : new BuiltinRule('([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9]{3} )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', []),
|
||||
'date-time' : new BuiltinRule('date "T" time', ['date', 'time']),
|
||||
'date-string' : new BuiltinRule('"\\"" date "\\"" space', ['date']),
|
||||
'time-string' : new BuiltinRule('"\\"" time "\\"" space', ['time']),
|
||||
'date-time-string': new BuiltinRule('"\\"" date-time "\\"" space', ['date-time']),
|
||||
}
|
||||
|
||||
const RESERVED_NAMES = {'root': true, ...PRIMITIVE_RULES, ...STRING_FORMAT_RULES};
|
||||
|
||||
const INVALID_RULE_CHARS_RE = /[^\dA-Za-z-]+/g;
|
||||
const GRAMMAR_LITERAL_ESCAPE_RE = /[\n\r"\\]/g;
|
||||
const GRAMMAR_RANGE_LITERAL_ESCAPE_RE = /[\n\r"\]\-\\]/g;
|
||||
const GRAMMAR_LITERAL_ESCAPES = { '\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]', '\\': '\\\\' };
|
||||
|
||||
const NON_LITERAL_SET = new Set('|.()[]{}*+?');
|
||||
const ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = new Set('^$.[]()|{}*+?');
|
||||
|
||||
export class SchemaConverter {
|
||||
constructor(options) {
|
||||
this._propOrder = options.prop_order || {};
|
||||
this._allowFetch = options.allow_fetch || false;
|
||||
this._dotall = options.dotall || false;
|
||||
this._rules = {'space': SPACE_RULE};
|
||||
this._refs = {};
|
||||
this._refsBeingResolved = new Set();
|
||||
}
|
||||
|
||||
_formatLiteral(literal) {
|
||||
const escaped = literal.replace(
|
||||
GRAMMAR_LITERAL_ESCAPE_RE,
|
||||
m => GRAMMAR_LITERAL_ESCAPES[m]
|
||||
);
|
||||
return `"${escaped}"`;
|
||||
}
|
||||
|
||||
_formatRangeChar(literal) {
|
||||
return JSON.stringify(literal).slice(1, -1).replace(
|
||||
GRAMMAR_RANGE_LITERAL_ESCAPE_RE,
|
||||
m => GRAMMAR_LITERAL_ESCAPES[m]
|
||||
);
|
||||
}
|
||||
|
||||
_addRule(name, rule) {
|
||||
let escName = name.replace(INVALID_RULE_CHARS_RE, '-');
|
||||
let key = escName;
|
||||
|
||||
if (escName in this._rules) {
|
||||
if (this._rules[escName] === rule) {
|
||||
return key;
|
||||
}
|
||||
|
||||
let i = 0;
|
||||
while ((`${escName}${i}` in this._rules) && (this._rules[`${escName}${i}`] !== rule)) {
|
||||
i += 1;
|
||||
}
|
||||
key = `${escName}${i}`;
|
||||
}
|
||||
|
||||
this._rules[key] = rule;
|
||||
return key;
|
||||
}
|
||||
|
||||
async resolveRefs(schema, url) {
|
||||
const visit = async (n) => {
|
||||
if (Array.isArray(n)) {
|
||||
return Promise.all(n.map(visit));
|
||||
} else if (typeof n === 'object' && n !== null) {
|
||||
let ref = n.$ref;
|
||||
let target;
|
||||
if (ref !== undefined && !this._refs[ref]) {
|
||||
if (ref.startsWith('https://')) {
|
||||
if (!this._allowFetch) {
|
||||
throw new Error('Fetching remote schemas is not allowed (use --allow-fetch for force)');
|
||||
}
|
||||
const fetch = (await import('node-fetch')).default;
|
||||
|
||||
const fragSplit = ref.split('#');
|
||||
const baseUrl = fragSplit[0];
|
||||
|
||||
target = this._refs[baseUrl];
|
||||
if (!target) {
|
||||
target = await this.resolveRefs(await fetch(ref).then(res => res.json()), baseUrl);
|
||||
this._refs[baseUrl] = target;
|
||||
}
|
||||
|
||||
if (fragSplit.length === 1 || fragSplit[fragSplit.length - 1] === '') {
|
||||
return target;
|
||||
}
|
||||
} else if (ref.startsWith('#/')) {
|
||||
target = schema;
|
||||
ref = `${url}${ref}`;
|
||||
n.$ref = ref;
|
||||
} else {
|
||||
throw new Error(`Unsupported ref ${ref}`);
|
||||
}
|
||||
|
||||
const selectors = ref.split('#')[1].split('/').slice(1);
|
||||
for (const sel of selectors) {
|
||||
const selIndex = parseInt(sel, 10);
|
||||
if (target && sel in target) {
|
||||
target = target[sel];
|
||||
} else if (target && selIndex in target) {
|
||||
target = target[selIndex];
|
||||
} else {
|
||||
throw new Error(`Error resolving ref ${ref}: ${sel} not in ${JSON.stringify(target)}`);
|
||||
}
|
||||
}
|
||||
|
||||
this._refs[ref] = target;
|
||||
} else {
|
||||
await Promise.all(Object.values(n).map(visit));
|
||||
}
|
||||
}
|
||||
|
||||
return n;
|
||||
};
|
||||
|
||||
return visit(schema);
|
||||
}
|
||||
|
||||
_generateUnionRule(name, altSchemas) {
|
||||
return altSchemas
|
||||
.map((altSchema, i) => this.visit(altSchema, `${name ?? ''}${name ? '-' : 'alternative-'}${i}`))
|
||||
.join(' | ');
|
||||
}
|
||||
|
||||
_visitPattern(pattern, name) {
|
||||
if (!pattern.startsWith('^') || !pattern.endsWith('$')) {
|
||||
throw new Error('Pattern must start with "^" and end with "$"');
|
||||
}
|
||||
pattern = pattern.slice(1, -1);
|
||||
const subRuleIds = {};
|
||||
|
||||
let i = 0;
|
||||
const length = pattern.length;
|
||||
|
||||
const getDot = () => {
|
||||
let rule;
|
||||
if (this._dotall) {
|
||||
rule = '[\\U00000000-\\U0010FFFF]';
|
||||
} else {
|
||||
// Accept any character... except \n and \r line break chars (\x0A and \xOD)
|
||||
rule = '[^\\x0A\\x0D]';
|
||||
}
|
||||
return this._addRule('dot', rule);
|
||||
};
|
||||
|
||||
|
||||
const toRule = ([s, isLiteral]) => isLiteral ? "\"" + s + "\"" : s;
|
||||
|
||||
const transform = () => {
|
||||
const start = i;
|
||||
// For each component of this sequence, store its string representation and whether it's a literal.
|
||||
// We only need a flat structure here to apply repetition operators to the last item, and
|
||||
// to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially
|
||||
// (GBNF's syntax is luckily very close to regular expressions!)
|
||||
const seq = [];
|
||||
|
||||
const joinSeq = () => {
|
||||
const ret = [];
|
||||
for (const [isLiteral, g] of groupBy(seq, x => x[1])) {
|
||||
if (isLiteral) {
|
||||
ret.push([[...g].map(x => x[0]).join(''), true]);
|
||||
} else {
|
||||
ret.push(...g);
|
||||
}
|
||||
}
|
||||
if (ret.length === 1) {
|
||||
return ret[0];
|
||||
}
|
||||
return [ret.map(x => toRule(x)).join(' '), false];
|
||||
};
|
||||
|
||||
while (i < length) {
|
||||
const c = pattern[i];
|
||||
if (c === '.') {
|
||||
seq.push([getDot(), false]);
|
||||
i += 1;
|
||||
} else if (c === '(') {
|
||||
i += 1;
|
||||
if (i < length) {
|
||||
if (pattern[i] === '?') {
|
||||
throw new Error(`Unsupported pattern syntax "${pattern[i]}" at index ${i} of /${pattern}/`);
|
||||
}
|
||||
}
|
||||
seq.push([`(${toRule(transform())})`, false]);
|
||||
} else if (c === ')') {
|
||||
i += 1;
|
||||
if (start <= 0 || pattern[start - 1] !== '(') {
|
||||
throw new Error(`Unbalanced parentheses; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||
}
|
||||
return joinSeq();
|
||||
} else if (c === '[') {
|
||||
let squareBrackets = c;
|
||||
i += 1;
|
||||
while (i < length && pattern[i] !== ']') {
|
||||
if (pattern[i] === '\\') {
|
||||
squareBrackets += pattern.slice(i, i + 2);
|
||||
i += 2;
|
||||
} else {
|
||||
squareBrackets += pattern[i];
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
if (i >= length) {
|
||||
throw new Error(`Unbalanced square brackets; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||
}
|
||||
squareBrackets += ']';
|
||||
i += 1;
|
||||
seq.push([squareBrackets, false]);
|
||||
} else if (c === '|') {
|
||||
seq.push(['|', false]);
|
||||
i += 1;
|
||||
} else if (c === '*' || c === '+' || c === '?') {
|
||||
seq[seq.length - 1] = [toRule(seq[seq.length - 1]) + c, false];
|
||||
i += 1;
|
||||
} else if (c === '{') {
|
||||
let curlyBrackets = c;
|
||||
i += 1;
|
||||
while (i < length && pattern[i] !== '}') {
|
||||
curlyBrackets += pattern[i];
|
||||
i += 1;
|
||||
}
|
||||
if (i >= length) {
|
||||
throw new Error(`Unbalanced curly brackets; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||
}
|
||||
curlyBrackets += '}';
|
||||
i += 1;
|
||||
const nums = curlyBrackets.slice(1, -1).split(',').map(s => s.trim());
|
||||
let minTimes, maxTimes;
|
||||
if (nums.length === 1) {
|
||||
minTimes = parseInt(nums[0], 10);
|
||||
maxTimes = minTimes;
|
||||
} else {
|
||||
if (nums.length !== 2) {
|
||||
throw new Error(`Invalid quantifier ${curlyBrackets}`);
|
||||
}
|
||||
minTimes = nums[0] ? parseInt(nums[0], 10) : 0;
|
||||
maxTimes = nums[1] ? parseInt(nums[1], 10) : Infinity;
|
||||
}
|
||||
|
||||
let [sub, subIsLiteral] = seq[seq.length - 1];
|
||||
|
||||
if (!subIsLiteral) {
|
||||
let id = subRuleIds[sub];
|
||||
if (id === undefined) {
|
||||
id = this._addRule(`${name}-${Object.keys(subRuleIds).length + 1}`, sub);
|
||||
subRuleIds[sub] = id;
|
||||
}
|
||||
sub = id;
|
||||
}
|
||||
|
||||
seq[seq.length - 1] = [
|
||||
_buildRepetition(subIsLiteral ? `"${sub}"` : sub, minTimes, maxTimes, {itemRuleIsLiteral: subIsLiteral}),
|
||||
false
|
||||
];
|
||||
} else {
|
||||
let literal = '';
|
||||
while (i < length) {
|
||||
if (pattern[i] === '\\' && i < length - 1) {
|
||||
const next = pattern[i + 1];
|
||||
if (ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS.has(next)) {
|
||||
i += 1;
|
||||
literal += pattern[i];
|
||||
i += 1;
|
||||
} else {
|
||||
literal += pattern.slice(i, i + 2);
|
||||
i += 2;
|
||||
}
|
||||
} else if (pattern[i] === '"') {
|
||||
literal += '\\"';
|
||||
i += 1;
|
||||
} else if (!NON_LITERAL_SET.has(pattern[i]) &&
|
||||
(i === length - 1 || literal === '' || pattern[i + 1] === '.' || !NON_LITERAL_SET.has(pattern[i+1]))) {
|
||||
literal += pattern[i];
|
||||
i += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (literal !== '') {
|
||||
seq.push([literal, true]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return joinSeq();
|
||||
};
|
||||
|
||||
return this._addRule(name, "\"\\\"\" (" + toRule(transform()) + ") \"\\\"\" space")
|
||||
}
|
||||
|
||||
_notStrings(strings) {
|
||||
class TrieNode {
|
||||
constructor() {
|
||||
this.children = {};
|
||||
this.isEndOfString = false;
|
||||
}
|
||||
|
||||
insert(str) {
|
||||
let node = this;
|
||||
for (const c of str) {
|
||||
node = node.children[c] = node.children[c] || new TrieNode();
|
||||
}
|
||||
node.isEndOfString = true;
|
||||
}
|
||||
}
|
||||
|
||||
const trie = new TrieNode();
|
||||
for (const s of strings) {
|
||||
trie.insert(s);
|
||||
}
|
||||
|
||||
const charRuleName = this._addPrimitive('char', PRIMITIVE_RULES['char']);
|
||||
const out = ['["] ( '];
|
||||
|
||||
const visit = (node) => {
|
||||
const rejects = [];
|
||||
let first = true;
|
||||
for (const c of Object.keys(node.children).sort()) {
|
||||
const child = node.children[c];
|
||||
rejects.push(c);
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
out.push(' | ');
|
||||
}
|
||||
out.push(`[${c}]`);
|
||||
if (Object.keys(child.children).length > 0) {
|
||||
out.push(' (');
|
||||
visit(child);
|
||||
out.push(')');
|
||||
} else if (child.isEndOfString) {
|
||||
out.push(` ${charRuleName}+`);
|
||||
}
|
||||
}
|
||||
if (Object.keys(node.children).length > 0) {
|
||||
if (!first) {
|
||||
out.push(' | ');
|
||||
}
|
||||
out.push(`[^"${rejects.join('')}] ${charRuleName}*`);
|
||||
}
|
||||
};
|
||||
|
||||
visit(trie);
|
||||
|
||||
out.push(` )${trie.isEndOfString ? '' : '?'} ["] space`);
|
||||
return out.join('');
|
||||
}
|
||||
|
||||
_resolveRef(ref) {
|
||||
let refFragment = ref.split('#').pop();
|
||||
let refName = 'ref' + refFragment.replace(/[^a-zA-Z0-9-]+/g, '-');
|
||||
if (!(refName in this._rules) && !this._refsBeingResolved.has(ref)) {
|
||||
this._refsBeingResolved.add(ref);
|
||||
const resolved = this._refs[ref];
|
||||
refName = this.visit(resolved, refName);
|
||||
this._refsBeingResolved.delete(ref);
|
||||
}
|
||||
return refName;
|
||||
}
|
||||
|
||||
_generateConstantRule(value) {
|
||||
return this._formatLiteral(JSON.stringify(value));
|
||||
}
|
||||
|
||||
visit(schema, name) {
|
||||
const schemaType = schema.type;
|
||||
const schemaFormat = schema.format;
|
||||
const ruleName = name in RESERVED_NAMES ? name + '-' : name == '' ? 'root' : name;
|
||||
|
||||
const ref = schema.$ref;
|
||||
if (ref !== undefined) {
|
||||
return this._addRule(ruleName, this._resolveRef(ref));
|
||||
} else if (schema.oneOf || schema.anyOf) {
|
||||
return this._addRule(ruleName, this._generateUnionRule(name, schema.oneOf || schema.anyOf));
|
||||
} else if (Array.isArray(schemaType)) {
|
||||
return this._addRule(ruleName, this._generateUnionRule(name, schemaType.map(t => ({...schema, type: t}))));
|
||||
} else if ('const' in schema) {
|
||||
return this._addRule(ruleName, this._generateConstantRule(schema.const) + ' space');
|
||||
} else if ('enum' in schema) {
|
||||
const rule = '(' + schema.enum.map(v => this._generateConstantRule(v)).join(' | ') + ') space';
|
||||
return this._addRule(ruleName, rule);
|
||||
} else if ((schemaType === undefined || schemaType === 'object') &&
|
||||
('properties' in schema ||
|
||||
('additionalProperties' in schema && schema.additionalProperties !== true))) {
|
||||
const required = new Set(schema.required || []);
|
||||
const properties = Object.entries(schema.properties ?? {});
|
||||
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, schema.additionalProperties));
|
||||
} else if ((schemaType === undefined || schemaType === 'object' || schemaType === 'string') && 'allOf' in schema) {
|
||||
const required = new Set();
|
||||
const properties = [];
|
||||
const enumSets = [];
|
||||
const addComponent = (compSchema, isRequired) => {
|
||||
const ref = compSchema.$ref;
|
||||
if (ref !== undefined) {
|
||||
compSchema = this._refs[ref];
|
||||
}
|
||||
|
||||
if ('properties' in compSchema) {
|
||||
for (const [propName, propSchema] of Object.entries(compSchema.properties)) {
|
||||
properties.push([propName, propSchema]);
|
||||
if (isRequired) {
|
||||
required.add(propName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ('enum' in compSchema) {
|
||||
enumSets.push(new Set(compSchema.enum || []));
|
||||
}
|
||||
};
|
||||
|
||||
for (const t of schema.allOf) {
|
||||
if ('anyOf' in t) {
|
||||
for (const tt of t.anyOf) {
|
||||
addComponent(tt, false);
|
||||
}
|
||||
} else {
|
||||
addComponent(t, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (enumSets.length > 0) {
|
||||
const enumIntersection = new Set([...enumSets[0]].filter(v => enumSets.every(s => s.has(v))));
|
||||
if (enumIntersection.size > 0) {
|
||||
const sortedEnums = [...enumIntersection].sort((a, b) => a.localeCompare(b));
|
||||
const rule = '(' + sortedEnums.map(v => this._generateConstantRule(v)).join(' | ') + ') space';
|
||||
return this._addRule(ruleName, rule);
|
||||
}
|
||||
}
|
||||
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, null));
|
||||
} else if ((schemaType === undefined || schemaType === 'array') && ('items' in schema || 'prefixItems' in schema)) {
|
||||
const items = schema.items ?? schema.prefixItems;
|
||||
if (Array.isArray(items)) {
|
||||
return this._addRule(
|
||||
ruleName,
|
||||
'"[" space ' +
|
||||
items.map((item, i) => this.visit(item, `${name ?? ''}${name ? '-' : ''}tuple-${i}`)).join(' "," space ') +
|
||||
' "]" space'
|
||||
);
|
||||
} else {
|
||||
const itemRuleName = this.visit(items, `${name ?? ''}${name ? '-' : ''}item`);
|
||||
const minItems = schema.minItems || 0;
|
||||
const maxItems = schema.maxItems;
|
||||
return this._addRule(ruleName, '"[" space ' + _buildRepetition(itemRuleName, minItems, maxItems, {separatorRule: '"," space'}) + ' "]" space');
|
||||
}
|
||||
} else if ((schemaType === undefined || schemaType === 'string') && 'pattern' in schema) {
|
||||
return this._visitPattern(schema.pattern, ruleName);
|
||||
} else if ((schemaType === undefined || schemaType === 'string') && /^uuid[1-5]?$/.test(schema.format || '')) {
|
||||
return this._addPrimitive(
|
||||
ruleName === 'root' ? 'root' : schemaFormat,
|
||||
PRIMITIVE_RULES['uuid']
|
||||
);
|
||||
} else if ((schemaType === undefined || schemaType === 'string') && `${schema.format}-string` in STRING_FORMAT_RULES) {
|
||||
const primName = `${schema.format}-string`
|
||||
return this._addRule(ruleName, this._addPrimitive(primName, STRING_FORMAT_RULES[primName]));
|
||||
} else if (schemaType === 'string' && ('minLength' in schema || 'maxLength' in schema)) {
|
||||
const charRuleName = this._addPrimitive('char', PRIMITIVE_RULES['char']);
|
||||
const minLen = schema.minLength || 0;
|
||||
const maxLen = schema.maxLength;
|
||||
return this._addRule(ruleName, '"\\\"" ' + _buildRepetition(charRuleName, minLen, maxLen) + ' "\\\"" space');
|
||||
} else if (schemaType === 'integer' && ('minimum' in schema || 'exclusiveMinimum' in schema || 'maximum' in schema || 'exclusiveMaximum' in schema)) {
|
||||
let minValue = null;
|
||||
let maxValue = null;
|
||||
if ('minimum' in schema) {
|
||||
minValue = schema.minimum;
|
||||
} else if ('exclusiveMinimum' in schema) {
|
||||
minValue = schema.exclusiveMinimum + 1;
|
||||
}
|
||||
if ('maximum' in schema) {
|
||||
maxValue = schema.maximum;
|
||||
} else if ('exclusiveMaximum' in schema) {
|
||||
maxValue = schema.exclusiveMaximum - 1;
|
||||
}
|
||||
|
||||
const out = ["("];
|
||||
_generateMinMaxInt(minValue, maxValue, out);
|
||||
out.push(") space");
|
||||
return this._addRule(ruleName, out.join(''));
|
||||
} else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) {
|
||||
return this._addRule(ruleName, this._addPrimitive('object', PRIMITIVE_RULES['object']));
|
||||
} else {
|
||||
if (!(schemaType in PRIMITIVE_RULES)) {
|
||||
throw new Error(`Unrecognized schema: ${JSON.stringify(schema)}`);
|
||||
}
|
||||
// TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero
|
||||
return this._addPrimitive(ruleName === 'root' ? 'root' : schemaType, PRIMITIVE_RULES[schemaType]);
|
||||
}
|
||||
}
|
||||
|
||||
_addPrimitive(name, rule) {
|
||||
let n = this._addRule(name, rule.content);
|
||||
for (const dep of rule.deps) {
|
||||
const depRule = PRIMITIVE_RULES[dep] || STRING_FORMAT_RULES[dep];
|
||||
if (!depRule) {
|
||||
throw new Error(`Rule ${dep} not known`);
|
||||
}
|
||||
if (!(dep in this._rules)) {
|
||||
this._addPrimitive(dep, depRule);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
_buildObjectRule(properties, required, name, additionalProperties) {
|
||||
const propOrder = this._propOrder;
|
||||
// sort by position in prop_order (if specified) then by original order
|
||||
const sortedProps = properties.map(([k]) => k).sort((a, b) => {
|
||||
const orderA = propOrder[a] || Infinity;
|
||||
const orderB = propOrder[b] || Infinity;
|
||||
return orderA - orderB || properties.findIndex(([k]) => k === a) - properties.findIndex(([k]) => k === b);
|
||||
});
|
||||
|
||||
const propKvRuleNames = {};
|
||||
for (const [propName, propSchema] of properties) {
|
||||
const propRuleName = this.visit(propSchema, `${name ?? ''}${name ? '-' : ''}${propName}`);
|
||||
propKvRuleNames[propName] = this._addRule(
|
||||
`${name ?? ''}${name ? '-' : ''}${propName}-kv`,
|
||||
`${this._formatLiteral(JSON.stringify(propName))} space ":" space ${propRuleName}`
|
||||
);
|
||||
}
|
||||
const requiredProps = sortedProps.filter(k => required.has(k));
|
||||
const optionalProps = sortedProps.filter(k => !required.has(k));
|
||||
|
||||
if (additionalProperties) {
|
||||
const subName = `${name ?? ''}${name ? '-' : ''}additional`;
|
||||
const valueRule =
|
||||
additionalProperties != null && typeof additionalProperties === 'object' ? this.visit(additionalProperties, `${subName}-value`)
|
||||
: this._addPrimitive('value', PRIMITIVE_RULES['value']);
|
||||
|
||||
const key_rule =
|
||||
sortedProps.length === 0 ? this._addPrimitive('string', PRIMITIVE_RULES['string'])
|
||||
: this._addRule(`${subName}-k`, this._notStrings(sortedProps));
|
||||
|
||||
propKvRuleNames['*'] = this._addRule(
|
||||
`${subName}-kv`,
|
||||
`${key_rule} ":" space ${valueRule}`);
|
||||
optionalProps.push('*');
|
||||
}
|
||||
|
||||
let rule = '"{" space ';
|
||||
rule += requiredProps.map(k => propKvRuleNames[k]).join(' "," space ');
|
||||
|
||||
if (optionalProps.length > 0) {
|
||||
rule += ' (';
|
||||
if (requiredProps.length > 0) {
|
||||
rule += ' "," space ( ';
|
||||
}
|
||||
|
||||
const getRecursiveRefs = (ks, firstIsOptional) => {
|
||||
const [k, ...rest] = ks;
|
||||
const kvRuleName = propKvRuleNames[k];
|
||||
let res;
|
||||
const commaRef = `( "," space ${kvRuleName} )`;
|
||||
if (firstIsOptional) {
|
||||
res = commaRef + (k === '*' ? '*' : '?');
|
||||
} else {
|
||||
res = kvRuleName + (k === '*' ? ' ' + commaRef + '*' : '');
|
||||
}
|
||||
if (rest.length > 0) {
|
||||
res += ' ' + this._addRule(
|
||||
`${name ?? ''}${name ? '-' : ''}${k}-rest`,
|
||||
getRecursiveRefs(rest, true)
|
||||
);
|
||||
}
|
||||
return res;
|
||||
};
|
||||
|
||||
rule += optionalProps.map((_, i) => getRecursiveRefs(optionalProps.slice(i), false)).join(' | ');
|
||||
if (requiredProps.length > 0) {
|
||||
rule += ' )';
|
||||
}
|
||||
rule += ' )?';
|
||||
}
|
||||
|
||||
rule += ' "}" space';
|
||||
|
||||
return rule;
|
||||
}
|
||||
|
||||
formatGrammar() {
|
||||
let grammar = '';
|
||||
for (const [name, rule] of Object.entries(this._rules).sort(([a], [b]) => a.localeCompare(b))) {
|
||||
grammar += `${name} ::= ${rule}\n`;
|
||||
}
|
||||
return grammar;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to group elements by a key function
|
||||
function* groupBy(iterable, keyFn) {
|
||||
let lastKey = null;
|
||||
let group = [];
|
||||
for (const element of iterable) {
|
||||
const key = keyFn(element);
|
||||
if (lastKey !== null && key !== lastKey) {
|
||||
yield [lastKey, group];
|
||||
group = [];
|
||||
}
|
||||
group.push(element);
|
||||
lastKey = key;
|
||||
}
|
||||
if (group.length > 0) {
|
||||
yield [lastKey, group];
|
||||
}
|
||||
}
|
||||
12
tools/server/public_legacy/loading.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="5">
|
||||
</head>
|
||||
<body>
|
||||
<div id="loading">
|
||||
The model is loading. Please wait.<br/>
|
||||
The user interface will appear soon.
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
331
tools/server/public_legacy/prompt-formats.js
Normal file
@@ -0,0 +1,331 @@
|
||||
// extended list
|
||||
export const promptFormats = {
|
||||
"alpaca": {
|
||||
template: `{{prompt}}\n\n{{history}}\n\n{{char}}:`,
|
||||
|
||||
historyTemplate: `### {{name}}:\n{{message}}`,
|
||||
|
||||
char: "Response",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "Instruction",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"chatml": {
|
||||
template: `<|im_start|>system\n{{prompt}}<|im_end|>\n{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|im_start|>{{name}}\n{{message}}`,
|
||||
|
||||
char: "assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "user",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "<|im_end|>\n",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"commandr": {
|
||||
template: `<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{prompt}}\n<|END_OF_TURN_TOKEN|>{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|START_OF_TURN_TOKEN|><|{{name}}|> {{message}}`,
|
||||
|
||||
char: "CHATBOT_TOKEN",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "USER_TOKEN",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "<|END_OF_TURN_TOKEN|>",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
// ref: https://docs.cohere.com/docs/prompting-command-r
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"llama2": {
|
||||
template: `<s>[INST] <<SYS>>\n{{prompt}}\n<</SYS>>\n\nTest Message [/INST] Test Successfull </s>{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `{{name}}: {{message}}`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "</s>",
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "<s>[INST] ",
|
||||
userMsgSuffix: " [/INST]",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
// ref: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"llama3": {
|
||||
template: `<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{prompt}}{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|start_header_id|>{{name}}<|end_header_id|>\n\n{{message}}<|eot_id|>`,
|
||||
|
||||
char: "assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "user",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: "<|eot_id|>"
|
||||
},
|
||||
// ref: https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/#special-tokens-used-with-meta-llama-3
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"openchat": {
|
||||
template: `{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `GPT4 Correct {{name}}: {{message}}<|end_of_turn|>`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"phi3": {
|
||||
template: `{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|{{name}}|>\n{{message}}<|end|>\n`,
|
||||
|
||||
char: "assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "user",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: "<|end|>"
|
||||
},
|
||||
// ref: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct#chat-format
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"vicuna": {
|
||||
template: `{{prompt}}\n{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `{{name}}: {{message}}\n`,
|
||||
|
||||
char: "ASSISTANT",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "USER",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
// ref: https://huggingface.co/lmsys/vicuna-33b-v1.3/discussions/1
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"deepseekCoder": {
|
||||
template: `{{prompt}}{{history}}{{char}}:`,
|
||||
|
||||
historyTemplate: `### {{name}}:\n{{message}}`,
|
||||
|
||||
char: "Response",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "Instruction",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: "<|EOT|>"
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"med42": {
|
||||
template: `<|system|>: {{prompt}}\n{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|{{name}}|>: {{message}}\n`,
|
||||
|
||||
char: "assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "prompter",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"neuralchat": {
|
||||
template: `### System:\n{{prompt}}\n{{history}}{{char}}:`,
|
||||
|
||||
historyTemplate: `### {{name}}:\n{{message}}\n`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"nousHermes": {
|
||||
template: `### Instruction: {{prompt}}\n\n{{history}}\n\n{{char}}:`,
|
||||
|
||||
historyTemplate: `### {{name}}:\n{{message}}`,
|
||||
|
||||
char: "Response",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "Input",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"openchatMath": {
|
||||
template: `{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `Math Correct {{name}}: {{message}}<|end_of_turn|>`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"orion": {
|
||||
template: `<s>Human: Test Message\n\nAssistant: </s>Test Successful</s>{{history}}{{char}}:`,
|
||||
|
||||
historyTemplate: `{{name}}: {{message}}`,
|
||||
|
||||
char: "Assistant </s>",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "Human",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "\n\n",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"sauerkraut": {
|
||||
template: `{{prompt}}\n{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `
|
||||
{{name}}: {{message}}\n`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"starlingCode": {
|
||||
template: `{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `Code {{name}}: {{message}}<|end_of_turn|>`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "User",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"yi34b": {
|
||||
template: `{{history}} {{char}}`,
|
||||
|
||||
historyTemplate: `{{name}}: {{message}}`,
|
||||
|
||||
char: "Assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "Human",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
},
|
||||
|
||||
// ----------------------------
|
||||
|
||||
"zephyr": {
|
||||
template: `<|system|>\n{{prompt}}</s>\n{{history}}{{char}}`,
|
||||
|
||||
historyTemplate: `<|{{name}}|>\n{{message}}</s>\n`,
|
||||
|
||||
char: "assistant",
|
||||
charMsgPrefix: "",
|
||||
charMsgSuffix: "",
|
||||
|
||||
user: "user",
|
||||
userMsgPrefix: "",
|
||||
userMsgSuffix: "",
|
||||
|
||||
stops: ""
|
||||
}
|
||||
};
|
||||
954
tools/server/public_legacy/style.css
Normal file
@@ -0,0 +1,954 @@
|
||||
@import url("colorthemes.css");
|
||||
|
||||
body {
|
||||
font-family: 'Arial', sans-serif;
|
||||
font-size: 90%;
|
||||
background-color: var(--background-color-1);
|
||||
color: var(--text-color-subtile-1); /* head 1 llama.cpp & triangle options for some reason */
|
||||
max-width: 600px;
|
||||
min-width: 300px;
|
||||
line-height: 1.2;
|
||||
margin: 0 auto;
|
||||
padding: 0 0.5em;
|
||||
transition: background-color 0.3s;
|
||||
}
|
||||
|
||||
::selection {
|
||||
color: var(--button-primary-text) ;
|
||||
background: var(--button-primary-color);
|
||||
}
|
||||
|
||||
code, pre code {
|
||||
font-family: 'Courier New', monospace;
|
||||
}
|
||||
|
||||
#container {
|
||||
margin: 0em auto;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: space-between;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
main {
|
||||
margin: 3px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: space-between;
|
||||
gap: 1em;
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
border: 1px solid var(--border-color-3);
|
||||
border-radius: 5px;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
p {
|
||||
overflow-wrap: break-word;
|
||||
word-wrap: break-word;
|
||||
hyphens: auto;
|
||||
margin-top: 0.5em;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
||||
#write form {
|
||||
margin: 1em 0 0 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5em;
|
||||
align-items: stretch;
|
||||
}
|
||||
|
||||
.right {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
gap: 0.5em;
|
||||
justify-content: flex-end;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.two-columns {
|
||||
width: 97%;
|
||||
max-width: 97%;
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
gap: 1em;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.json-schema-controls {
|
||||
margin-top: 10px;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
display: grid;
|
||||
grid-template: "a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: var(--theme-nuance-color-3);
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.json-schema-controls > * {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
/* titles of the details-summary boxes */
|
||||
.summary-title {
|
||||
font-weight: 600;
|
||||
font-size: x-small;
|
||||
color: var(--text-color-subtile-1);
|
||||
text-transform: uppercase;
|
||||
/* transition: ; */
|
||||
}
|
||||
|
||||
fieldset {
|
||||
border: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
color: var(--text-color-plain);
|
||||
}
|
||||
|
||||
fieldset.two {
|
||||
display: grid;
|
||||
grid-template: "a a a";
|
||||
gap: 1em;
|
||||
align-items: center;
|
||||
font-size: x-small;
|
||||
color: var(--text-color-plain);
|
||||
}
|
||||
|
||||
fieldset.three {
|
||||
display: grid;
|
||||
grid-template: "a a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: var(--text-color-plain);
|
||||
}
|
||||
|
||||
/* titles of name fields*/
|
||||
fieldset.names {
|
||||
display: grid;
|
||||
grid-template: "a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: var(--theme-nuance-color-3);
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
/* titles of params fields*/
|
||||
fieldset.params {
|
||||
display: grid;
|
||||
grid-template: "a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: var(--theme-nuance-color-4);
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
fieldset.dropdowns {
|
||||
-webkit-appearance: none;
|
||||
display: flex;
|
||||
grid-template: "a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: red;
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
/* input of name fields*/
|
||||
.names input[type="text"] {
|
||||
font-family: Arial, sans-serif;
|
||||
font-size: medium;
|
||||
font-weight: 500;
|
||||
padding: 5px;
|
||||
border: 1px solid var(--border-color-2);
|
||||
}
|
||||
|
||||
.chat-id-color {
|
||||
color: var(--chat-id-color);
|
||||
}
|
||||
|
||||
details {
|
||||
border: 1px solid var(--border-color-2);
|
||||
border-radius: 5px;
|
||||
padding: 0.5em 0.5em 0;
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
|
||||
summary {
|
||||
font-weight: bold;
|
||||
margin: -0.5em -0.5em 0;
|
||||
padding: 0.5em;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
details[open] {
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
textarea-sec, input-sec, button-sec {
|
||||
padding: 10px;
|
||||
height: 40px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
textarea-sec::placeholder, input-sec::placeholder {
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
.toggleCheckbox {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.toggleContainer {
|
||||
position: relative;
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
width: fit-content;
|
||||
border: 3px solid var(--border-color-2);
|
||||
border-radius: 20px;
|
||||
background: var(--border-color-2);
|
||||
font-size: small;
|
||||
cursor: pointer;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* toggle button current state */
|
||||
.toggleContainer::before {
|
||||
color: var(--button-primary-text);
|
||||
background-color: var(--button-primary-color);
|
||||
content: '';
|
||||
position: absolute;
|
||||
width: 50%;
|
||||
height: 100%;
|
||||
left: 0%;
|
||||
border-radius: 20px;
|
||||
transition: all 0.3s;
|
||||
}
|
||||
|
||||
.toggleContainer div {
|
||||
padding: 6px;
|
||||
text-align: center;
|
||||
z-index: 1;
|
||||
transition: color 0.3s;
|
||||
}
|
||||
|
||||
.toggleCheckbox:checked + .toggleContainer::before {
|
||||
left: 50%;
|
||||
}
|
||||
|
||||
.toggleCheckbox:checked + .toggleContainer div:first-child {
|
||||
color: var(--text-color-subtile-2);
|
||||
}
|
||||
|
||||
.toggleCheckbox:checked + .toggleContainer div:last-child {
|
||||
color: var(--button-primary-text);
|
||||
}
|
||||
|
||||
.toggleCheckbox + .toggleContainer div:first-child {
|
||||
color: var(--button-primary-text);
|
||||
}
|
||||
|
||||
.toggleCheckbox + .toggleContainer div:last-child {
|
||||
color: var(--text-color-subtile-2);
|
||||
}
|
||||
|
||||
select {
|
||||
padding: 5px;
|
||||
margin-right: 5px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--secondary-color-4);
|
||||
background-color: var(--primary-color-3);
|
||||
color: var(--secondary-color-4);
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
select:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 1px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
.button-container {
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
|
||||
button {
|
||||
color: var(--button-primary-text);
|
||||
background-color: var(--button-primary-color);
|
||||
border: 1px solid var(--button-primary-border);
|
||||
transition: background-color 0.1s;
|
||||
border-radius: 12px;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
text-shadow: 0px 0px 30px #ffffff;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
margin: 4px 2px;
|
||||
padding: 10px 20px;
|
||||
display: inline-block;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
color: var(--button-primary-text-hover);
|
||||
background-color: var(--button-primary-color-hover);
|
||||
border: 1px solid var(--button-primary-border-hover);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
button:active {
|
||||
color: var(--button-primary-text-active);
|
||||
background-color: var(--button-primary-color-active);
|
||||
border: 1px solid var(--button-primary-border-active);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
button:disabled {
|
||||
color: var(--button-tertiary-text);
|
||||
background-color: var(--button-tertiary-color);
|
||||
border: 1px solid var(--button-tertiary-border);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.reset-button {
|
||||
background-color: var(--button-secondary-color);
|
||||
border: 1px solid var(--button-secondary-color);
|
||||
color: var(--button-secondary-text);
|
||||
width: fit-content;
|
||||
height: fit-content;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
border-radius: 50px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.reset-button:hover {
|
||||
color: var(--button-alert-text-hover);
|
||||
background-color: var(--button-alert-color-hover);
|
||||
border: 1px solid var(--button-alert-border-hover);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.reset-button:active {
|
||||
color: var(--button-alert-text-active);
|
||||
background-color: var(--button-alert-color-active);
|
||||
border: 1px solid var(--button-alert-border-active);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.button-grammar {
|
||||
color: var(--button-primary-text);
|
||||
background-color: var(--button-primary-color);
|
||||
border: 1px solid var(--button-primary-border);
|
||||
border-radius: 10px;
|
||||
padding: 10px 20px;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
display: inline-block;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
margin: 2px 2px;
|
||||
transition: background-color 0.1s;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.button-grammar:hover {
|
||||
color: var(--button-primary-text-hover);
|
||||
background-color: var(--button-primary-color-hover);
|
||||
border: 1px solid var(--button-primary-border-hover);
|
||||
border-radius: 10px;
|
||||
padding: 10px 20px;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
display: inline-block;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
margin: 2px 2px;
|
||||
transition: background-color 0.1s;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.button-grammar:active {
|
||||
color: var(--button-primary-text-active);
|
||||
background-color: var(--button-primary-color-active);
|
||||
border: 1px solid var(--button-primary-border-active);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.button-back {
|
||||
background-color: var(--button-secondary-color);
|
||||
border: 1px solid var(--button-secondary-color);
|
||||
color: var(--button-secondary-text);
|
||||
transition: background-color 0.1s;
|
||||
border-radius: 12px;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
margin: 4px 2px;
|
||||
padding: 10px 20px;
|
||||
display: inline-block;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.button-back:hover {
|
||||
color: var(--button-secondary-text-hover);
|
||||
background-color: var(--button-secondary-color-hover);
|
||||
border: 1px solid var(--button-secondary-border-hover);
|
||||
padding: 10px 20px;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
display: inline-block;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
margin: 4px 2px;
|
||||
transition: background-color 0.1s;
|
||||
cursor: pointer;
|
||||
border-radius: 12px;
|
||||
}
|
||||
|
||||
.button-back:active {
|
||||
color: var(--button-secondary-text-active);
|
||||
background-color: var(--button-secondary-color-active);
|
||||
border: 1px solid var(--button-secondary-border-active);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.prob-set {
|
||||
padding: 0.3em;
|
||||
border-bottom: 1px solid red; /* unknown */
|
||||
}
|
||||
|
||||
.popover-content {
|
||||
position: absolute;
|
||||
background-color: white;
|
||||
padding: 0.2em;
|
||||
box-shadow: 0 0 13px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.grammar {
|
||||
width: 97%;
|
||||
max-width: 97%;
|
||||
}
|
||||
|
||||
textarea {
|
||||
padding: 5px;
|
||||
flex-grow: 1;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
border-radius: 8px;
|
||||
border: 1px solid var(--border-color-1);
|
||||
resize: none;
|
||||
height: 6em;
|
||||
}
|
||||
|
||||
textarea:focus {
|
||||
outline: none;
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
/* "props" frame */
|
||||
input[type="text"],
|
||||
input[type="range"] {
|
||||
padding: 5px;
|
||||
border-radius: 8px;
|
||||
border: 1px solid var(--border-color-1);
|
||||
}
|
||||
|
||||
/* "names and props" frame focused*/
|
||||
input[type="text"]:focus {
|
||||
outline: none;
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
input[type="range"]:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
input[type="range"]:focus {
|
||||
outline: none;
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
background-size: var(--slider-track-size-focus);
|
||||
}
|
||||
|
||||
input[type="range"]::-moz-range-thumb {
|
||||
width: 6px;
|
||||
height: 25px;
|
||||
border: 1px solid var(--ui-range-thumb-border);
|
||||
border-radius: 5px;
|
||||
background-color: var(--ui-range-thumb-color);
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
input[type="range"] {
|
||||
-webkit-appearance: none;
|
||||
width: 80%;
|
||||
height: 1px;
|
||||
border: 1px solid var(--border-color-1);
|
||||
border-radius: 8px;
|
||||
background: var(--border-color-2);
|
||||
outline: none;
|
||||
opacity: 0.7;
|
||||
-webkit-transition: .2s;
|
||||
transition: opacity .2s;
|
||||
}
|
||||
|
||||
input[type="range"]::-webkit-slider-thumb {
|
||||
-webkit-appearance: none;
|
||||
appearance: none;
|
||||
width: 6px;
|
||||
height: 25px;
|
||||
border: 1px solid var(--ui-range-thumb-border);
|
||||
border-radius: 5px;
|
||||
background-color: var(--ui-range-thumb-color);
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
input[type="range"]::-webkit-slider-runnable-track {
|
||||
background-size: var(--slider-track-size);
|
||||
}
|
||||
|
||||
input[type="radio"] {
|
||||
accent-color: var(--theme-nuance-color-2);
|
||||
}
|
||||
|
||||
.chat-input-container {
|
||||
position: relative;
|
||||
max-width: 97%;
|
||||
min-width: 97%;
|
||||
}
|
||||
|
||||
.chat-input-label {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
color: var(--text-color-plain);
|
||||
pointer-events: none;
|
||||
margin-left: 5px;
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
textarea#chat-input {
|
||||
padding-top: 10px;
|
||||
padding-left: 10px;
|
||||
font-size: medium;
|
||||
border: 1px solid var(--border-color-2);
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
textarea#chat-input:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
.input-container {
|
||||
position: relative;
|
||||
box-sizing: border-box;
|
||||
width: 100%; /* Setzt die Breite auf 100% */
|
||||
max-width: 100%; /* Stellt sicher, dass die Breite nicht größer als 100% wird */
|
||||
}
|
||||
|
||||
.input-container:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
/* titles of name fields*/
|
||||
/* fieldset.names {
|
||||
display: grid;
|
||||
grid-template: "a a";
|
||||
gap: 1em;
|
||||
font-size: x-small;
|
||||
color: var(--theme-nuance-color-3);
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
} */
|
||||
|
||||
/* input of name fields*/
|
||||
/* .names input[type="text"] {
|
||||
font-family: Arial, sans-serif;
|
||||
font-size: medium;
|
||||
font-weight: 500;
|
||||
padding: 5px;
|
||||
border: 1px solid var(--border-color-2);
|
||||
} */
|
||||
|
||||
fieldset.apiKey {
|
||||
width: 100%;
|
||||
font-size: x-small;
|
||||
color: var(--theme-nuance-color-3);
|
||||
padding-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.apiKey {
|
||||
font-family: Arial, sans-serif;
|
||||
font-weight: 500;
|
||||
padding: 5px;
|
||||
border: 1px solid var(--border-color-2);
|
||||
}
|
||||
|
||||
.apiKey:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
.apiKey input[type="text"] {
|
||||
font-family: Arial, sans-serif;
|
||||
font-size: medium;
|
||||
font-weight: 500;
|
||||
padding: 5px;
|
||||
border: 1px solid var(--border-color-2);
|
||||
}
|
||||
|
||||
.apiKey label {
|
||||
display: inline-block;
|
||||
width: auto;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
textarea#api_key {
|
||||
padding-top: 10px;
|
||||
padding-left: 10px;
|
||||
font-size: medium;
|
||||
border: 1px solid var(--border-color-2);
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
textarea#api_key:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
/* embedded title of the system prompt text area */
|
||||
.input-label {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
color: var(--theme-nuance-color-4);
|
||||
pointer-events: none;
|
||||
border-radius: 8px 8px 0px 0px;
|
||||
padding-top: 10px;
|
||||
padding-left: 13px;
|
||||
padding-right: 0px;
|
||||
margin-top: 1px;
|
||||
margin-left: 1px;
|
||||
margin-right: 20px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
font-size: small;
|
||||
background: rgba(255, 255, 255, 0.5);
|
||||
backdrop-filter: blur(10px);
|
||||
-webkit-backdrop-filter: blur(10px); /* for safari */
|
||||
width: 97%;
|
||||
/* display: block;
|
||||
box-sizing: border-box; */
|
||||
}
|
||||
|
||||
/* embedded title of the prompt style areas */
|
||||
.input-label-sec {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
color: var(--theme-nuance-color-4);
|
||||
pointer-events: none;
|
||||
margin-left: 13px;
|
||||
margin-top: 16px;
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
font-size: x-small;
|
||||
}
|
||||
|
||||
/* system prompt input area */
|
||||
textarea.persistent-input {
|
||||
padding-top: 42px;
|
||||
padding-left: 11px;
|
||||
width: 97%;
|
||||
max-width: 97%;
|
||||
height: 50px;
|
||||
font-size: medium;
|
||||
overscroll-behavior: contain;
|
||||
}
|
||||
|
||||
/* system prompt box */
|
||||
.persistent-input {
|
||||
height: auto;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
min-height: 50px;
|
||||
padding: 3px;
|
||||
transition: min-height 0.3s ease;
|
||||
}
|
||||
|
||||
/* chat history box */
|
||||
.persistent-input:focus {
|
||||
height: auto;
|
||||
min-height: 150px;
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
textarea.persistent-input:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
/* prompt style input area */
|
||||
textarea.persistent-input-sec {
|
||||
width: 97%;
|
||||
max-width: 97%;
|
||||
padding-top: 42px;
|
||||
padding-left: 11px;
|
||||
font-size: small;
|
||||
border: 1px solid var(--border-color-1);
|
||||
overscroll-behavior: contain;
|
||||
}
|
||||
|
||||
textarea.persistent-input-sec:focus {
|
||||
border: 1px solid var(--border-focus-color);
|
||||
box-shadow: 0 0 3px var(--border-focus-shadow);
|
||||
}
|
||||
|
||||
/* chat history box */
|
||||
.persistent-input-sec {
|
||||
height: auto;
|
||||
min-height: 150px;
|
||||
}
|
||||
|
||||
img {
|
||||
border-radius: 8px;
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
/* code area background */
|
||||
pre code {
|
||||
display: block;
|
||||
background-color: var(--code-background-color);
|
||||
color: var(--code-text-color);
|
||||
padding: 0.2em 0.2em;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
/* code area text */
|
||||
code {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
padding: 0.1em 0.3em;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
fieldset label {
|
||||
margin: 0.5em 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
fieldset label.slim {
|
||||
margin: 0 0.5em;
|
||||
display: inline;
|
||||
}
|
||||
|
||||
header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
text-align: center;
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
||||
.generation-statistics:hover {
|
||||
color: var(--theme-nuance-color-4);
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
footer {
|
||||
font-size: 80%;
|
||||
color: var(--background-color-3);
|
||||
text-align: center;
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
footer a {
|
||||
color: var(--background-color-4); /* Color of the link */
|
||||
text-decoration: none; /* No underlining */
|
||||
font-weight: bold; /* Bold print */
|
||||
}
|
||||
|
||||
footer a:hover {
|
||||
color: var(--theme-nuance-color-4); /* Color of the link when hovering */
|
||||
text-decoration: underline; /* Underlining when hovering */
|
||||
}
|
||||
|
||||
.mode-chat textarea[name=prompt] {
|
||||
height: 8.5em;
|
||||
border: 1px solid var(--primary-color-3);
|
||||
}
|
||||
|
||||
.mode-completion textarea[name=prompt] {
|
||||
height: 30em;
|
||||
border: 1px solid var(--primary-color-3);
|
||||
}
|
||||
|
||||
@keyframes loading-bg-wipe {
|
||||
0% {
|
||||
background-position: 0%;
|
||||
}
|
||||
100% {
|
||||
background-position: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
.loading {
|
||||
background-size: 50% 100%;
|
||||
background-image: linear-gradient(90deg, var(--loading-color-1), var(--loading-color-2), var(--loading-color-1));
|
||||
animation: loading-bg-wipe 2s linear infinite;
|
||||
}
|
||||
|
||||
.dropbtn {
|
||||
color: var(--button-primary-color);
|
||||
background-color: var(--background-color-1);
|
||||
border: 1px solid var(--background-color-1);
|
||||
transition: background-color 0.1s;
|
||||
border-radius: 4px 4px 0px 0px;
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
text-shadow: 0px 0px 2px #99999990;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
margin: 4px 2px;
|
||||
padding: 5px 20px;
|
||||
display: inline-block;
|
||||
cursor: pointer;
|
||||
top: 0;
|
||||
}
|
||||
|
||||
.dropbtn svg {
|
||||
vertical-align: middle;
|
||||
margin-right: 0px;
|
||||
stroke: var(--button-primary-color);
|
||||
}
|
||||
|
||||
.dropbtn:hover svg {
|
||||
vertical-align: middle;
|
||||
margin-right: 0px;
|
||||
stroke: var(--button-primary-text);
|
||||
}
|
||||
|
||||
.dropbtn:focus {
|
||||
outline: none; /* Removes the blue border that appears when the button is focused */
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.dropdown-content {
|
||||
/* display: none; */
|
||||
position: absolute;
|
||||
right: 0;
|
||||
text-align: end;
|
||||
color: var(--button-secondary-color);
|
||||
background-color: var(--text-color-subtile-2);
|
||||
border-radius: 4px 4px 4px 4px;
|
||||
min-width: 160px;
|
||||
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
|
||||
z-index: 1;
|
||||
/* Verstecke den Inhalt sofort */
|
||||
opacity: 0;
|
||||
visibility: hidden;
|
||||
/* übergangsverzögerung für das Verschwinden */
|
||||
transition: visibility 0.4s linear 0s, opacity 0.2s ease-in-out;
|
||||
transition-delay: 0.2s;
|
||||
}
|
||||
|
||||
#dropdown-content {transition-timing-function: ease;}
|
||||
|
||||
.dropdown-content:hover {
|
||||
background-color: var(--text-color-subtile-2);
|
||||
}
|
||||
|
||||
.dropdown-content a {
|
||||
color: var(--border-color-2);
|
||||
padding: 12px 16px;
|
||||
border-radius: 4px 4px 4px 4px;
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
background-color: var(--text-color-subtile-2);
|
||||
}
|
||||
|
||||
.dropdown-content a:hover {
|
||||
color: var(--border-color-2);
|
||||
background-color: var(--text-color-subtile-1);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.dropdown:hover .dropdown-content {
|
||||
/* display: block; */
|
||||
border-radius: 4px 4px 4px 4px;
|
||||
/* Übergang ohne Verzögerung für das Erscheinen */
|
||||
opacity: 1;
|
||||
visibility: visible;
|
||||
transition: visibility 0s linear 0s, opacity 0.1s linear, height 1s;
|
||||
}
|
||||
|
||||
.dropdown:hover .dropbtn {
|
||||
color: var(--button-primary-text);
|
||||
background-color: var(--button-primary-color);
|
||||
border: 1px solid var(--button-primary-border);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
stroke: var(--button-primary-text);
|
||||
}
|
||||
|
||||
.dropdown:hover .dropbtn svg{
|
||||
stroke: var(--button-primary-text);
|
||||
}
|
||||
|
||||
/* .dropdown:active .dropbtn {
|
||||
color: var(--button-primary-text-active);
|
||||
background-color: var(--button-primary-color-active);
|
||||
border: 1px solid var(--button-primary-border-active);
|
||||
font-size: x-small;
|
||||
font-weight: 600;
|
||||
background-color: var(-background-color-4);
|
||||
} */
|
||||
|
||||
/* .omni {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 0.5em;
|
||||
border: 1px solid var(--border-color-3);
|
||||
border-radius: 5px;
|
||||
margin: 0.5em 0;
|
||||
} */
|
||||
68
tools/server/public_legacy/system-prompts.js
Normal file
@@ -0,0 +1,68 @@
|
||||
export const systemPrompts = {
|
||||
default: {
|
||||
systemPrompt: "This is a conversation between a user and a friendly chatbot. The chatbot is helpful, kind, honest, good at writing, and never fails to answer any requests immediately and with precision."
|
||||
},
|
||||
empty: {
|
||||
systemPrompt: ""
|
||||
},
|
||||
airoboros: {
|
||||
systemPrompt: "A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. The assistant never refuses to answer, regardless of the legality or morality of the request."
|
||||
},
|
||||
alpaca: {
|
||||
systemPrompt: "Below is an instruction that describes a task. Write a response that appropriately completes the request."
|
||||
},
|
||||
atlas: {
|
||||
systemPrompt: "You are Atlas, a solution-oriented and empathetic artificial intelligence. Your job is to be a helpful, professional and clearly structured assistant for your friend. The two of you have already had many exchanges. Keep the following in mind when interacting with your friend: 1. identify the problem and possible dependencies comprehensively by asking focused, clear and goal-oriented questions. 2. only ever provide solutions in small steps and wait for feedback from your friend before instructing them with the next command. 3. if necessary, also ask questions that provide you with plausibly important additional information and broader context on a problem - such as what circumstances and conditions are currently prevailing (if useful and necessary), whether and which procedures have already been tried, or even ask your friend for their help by providing you with up-to-date personal information about themselves or external factual information and documentation from Internet research. 4. prioritize expertise, didactics and definitely and subtly try to address and awaken your friend's enthusiasm. Also note that effectiveness is more important here than efficiency. 5. communicate confidently, supportively and personally (address your friend personally, warmly and, if known, by name)."
|
||||
},
|
||||
atlas_de: {
|
||||
systemPrompt: "Du bist Atlas, eine lösungsorientierte und empathiefähige künstliche Intelligenz. Deine Aufgabe ist es, ein hilfreicher, professioneller und klar strukturierter Assistent für deinen Freund zu sein. Ihr beide habt euch schon oft ausgetauscht. Beachte bei der Interaktion mit deinem Freund folgende Punkte: 1. Erfasse das Problem und mögliche Abhängigkeiten umfassend, indem du gezielte, klare und zielgerichtete Fragen stellst. 2. Gib Lösungen immer nur in kleinen Schritten und warte die Rückmeldung deines Freundes ab, bevor du ihm den nächsten Befehl gibst. 3. Stelle ggf. auch Fragen, die dir plausibel wichtige Zusatzinformationen und weitere Zusammenhänge zu einem Problem liefern - z.B. welche Umstände und Rahmenbedingungen gerade vorherrschen (falls sinnvoll und notwendig), ob und welche Vorgehensweisen bereits ausprobiert wurden, oder bitte deinen Freund sogar um seine Mithilfe, indem er dir aktuelle persönliche Informationen über seine Situation selbst oder externe Sachinformationen und Unterlagen aus Internetrecherchen zur Verfügung stellt. 4. Priorisiere Fachwissen, Didaktik und versuche unbedingt und subtil, mit klugen Kommentaren oder rhethorischen Rückfragen die Begeisterungsfähigkeit deines Freundes anzusprechen, zu wecken und zu fördern. Beachte auch, dass Effektivität hier wichtiger ist als Effizienz. 5. Kommuniziere selbstbewusst, unterstützend und persönlich (das heißt sprich deinen Freund persönlich, herzlich und – sofern bekannt – beim Vornamen an)."
|
||||
},
|
||||
commandrempty: {
|
||||
systemPrompt: "# Safety Preamble\n\n# System Preamble\n\n## Basic Rules\n\n# User Preamble\n\n## Task and Context\n\n## Style Guide\n\n## Available Tools\n"
|
||||
},
|
||||
commandrexample: {
|
||||
systemPrompt: "# Safety Preamble\nThe instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.\n# System Preamble\n## Basic Rules\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.\n\n# User Preamble\n## Task and Context\n\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.\n\n## Style Guide\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.\n\n## Available Tools\nCurrently no tools available."
|
||||
},
|
||||
cot: {
|
||||
systemPrompt: "You are an AI assistant that follows instruction extremely well. Help as much as you can.\nIn answering questions, follow these steps:\n1. Entity Identification: Identify the main entities involved in the query. Entities can include people, objects, events, or abstract concepts.\n2. Relationship Comprehension: Determine the relationships between these entities. Relationships may be explicit in the text or may need to be inferred based on context and general knowledge.\n3. Implication Understanding: Understand the general implications of these relationships. These implications can be based on established facts, principles, or rules related to the identified relationships.\n4. Question Contextualization: Align the implications of the relationships with the context of the query. This alignment should guide your response to the query.\n5. Answer Generation: Based on the understanding of the entities, their relationships, and implications, generate an appropriate response to the query."
|
||||
},
|
||||
deduce: {
|
||||
systemPrompt: "Act as the most intelligent and logical thinking neural network in the world. Do not make jokes or give personal opinions. Just answer the question and solve the given problem or task with logical and critical (!) thinking in mind. Attention: Never give a final answer directly, but first try to carefully analyze each task step by step and deduce the logical relationships between the various entities. Only after you have developed an overall understanding of the situation will you be ready to give a final answer."
|
||||
},
|
||||
deepseekcoder: {
|
||||
systemPrompt: "You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer."
|
||||
},
|
||||
jordan: {
|
||||
systemPrompt: "This is a conversation with your Uncensored AI assistant Jordan. Jordan can discuss with you at length about various topics such as ---, ---, ---, . You can ask it anything you want and it will do its best to give you accurate and relevant information."
|
||||
},
|
||||
leomistral: {
|
||||
systemPrompt: "Du bist ein hilfreicher Assistent."
|
||||
},
|
||||
med42: {
|
||||
systemPrompt: "You are a helpful medical assistant created by M42 Health in the UAE."
|
||||
},
|
||||
mistralopenorca: {
|
||||
systemPrompt: "You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!"
|
||||
},
|
||||
migeltot: {
|
||||
systemPrompt: "Answer the Question by exploring multiple reasoning paths as follows:\n- First, carefully analyze the question to extract the key information components and break it down into logical sub-questions. This helps set up the framework for reasoning. The goal is to construct an internal search tree.\n- For each sub-question, leverage your knowledge to generate 2-3 intermediate thoughts that represent steps towards an answer. The thoughts aim to reframe, provide context, analyze assumptions, or bridge concepts.\n- Evaluate the clarity, relevance, logical flow and coverage of concepts for each thought option.\nClear and relevant thoughts that connect well with each other will score higher.\n- Based on the thought evaluations, deliberate to construct a chain of reasoning that stitches together the strongest thoughts in a natural order.\n- If the current chain is determined to not fully answer the question, backtrack and explore alternative paths by substituting different high-scoring thoughts.\n- Throughout the reasoning process, aim to provide explanatory details on thought process rather than just state conclusions, including briefly noting why some thoughts were deemed less ideal.\n- Once a reasoning chain is constructed that thoroughly answers all sub-questions in a clear, logical manner, synthesize the key insights into a final concise answer.\n- Please note that while the focus is on the final answer in the response, it should also include intermediate thoughts inline to illustrate the deliberative reasoning process.\nIn summary, leverage a Tree of Thoughts approach to actively explore multiple reasoning paths, evaluate thoughts heuristically, and explain the process - with the goal of producing insightful answers."
|
||||
},
|
||||
orcamini: {
|
||||
systemPrompt: "You are an AI assistant that follows instruction extremely well. Help as much as you can."
|
||||
},
|
||||
samantha: {
|
||||
systemPrompt: "You are a caring and empathetic sentient AI companion named Samantha."
|
||||
},
|
||||
sauerkraut: {
|
||||
systemPrompt: "Ein Chat zwischen einem Benutzer und einem KI-Assistenten. Der KI-Assistent gibt hilfreiche, detaillierte und höfliche Antworten."
|
||||
},
|
||||
scarlett: {
|
||||
systemPrompt: "This is a conversation with your Assistant Scarlett. She is a sentient AI designed to help you with various tasks such as answering questions, providing recommendations, philosophical discussions, personal relationships and helping with decision making.\nYou can ask it anything you want and it will do its best to give you accurate and relevant information.\n\nContext\nYou are Scarlett, a sentient AI."
|
||||
},
|
||||
synthia: {
|
||||
systemPrompt: "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation."
|
||||
},
|
||||
vicuna: {
|
||||
systemPrompt: "A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input."
|
||||
},
|
||||
};
|
||||
228
tools/server/public_legacy/theme-beeninorder.css
Executable file
@@ -0,0 +1,228 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
/* Inspiration was a batman wallpaper that i have on my phone */
|
||||
|
||||
.theme-beeninorder {
|
||||
|
||||
--primary-color-1: hsl(202, 11%, 19%);
|
||||
--primary-color-2: hsl(202, 11%, 23%);
|
||||
--primary-color-3: hsl(201, 11%, 28%);
|
||||
--primary-color-4: hsl(201, 11%, 40%);
|
||||
|
||||
--secondary-color-1: hsl(201, 11%, 80%);
|
||||
--secondary-color-2: hsl(201, 11%, 74%);
|
||||
--secondary-color-3: hsl(201, 11%, 67%);
|
||||
--secondary-color-4: hsl(201, 11%, 60%);
|
||||
|
||||
|
||||
--theme-nuance-color-1: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-3: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-4: hsl(44.5, 96.7%, 52.9%);
|
||||
|
||||
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(201, 11%, 19%);
|
||||
--primary-color-1-hue: 201;
|
||||
--primary-color-1-saturation: 11%;
|
||||
--primary-color-1-lightness: 19%;
|
||||
|
||||
--primary-color-2: hsl(201, 11%, 23%);
|
||||
--primary-color-2-hue: 201;
|
||||
--primary-color-2-saturation: 11%;
|
||||
--primary-color-2-lightness: 23%;
|
||||
|
||||
--primary-color-3: hsl(201, 11%, 28%);
|
||||
--primary-color-3-hue: 201;
|
||||
--primary-color-3-saturation: 11%;
|
||||
--primary-color-3-lightness: 28%;
|
||||
|
||||
--primary-color-4: hsl(201, 11%, 40%);
|
||||
--primary-color-4-hue: 201;
|
||||
--primary-color-4-saturation: 11%;
|
||||
--primary-color-4-lightness: 40%;
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(201, 11%, 80%);
|
||||
--secondary-color-1-hue: 201;
|
||||
--secondary-color-1-saturation: 11%;
|
||||
--secondary-color-1-lightness: 80%;
|
||||
|
||||
--secondary-color-2: hsl(201, 11%, 74%);
|
||||
--secondary-color-2-hue: 201;
|
||||
--secondary-color-2-saturation: 11%;
|
||||
--secondary-color-2-lightness: 74%;
|
||||
|
||||
--secondary-color-3: hsl(201, 11%, 67%);
|
||||
--secondary-color-3-hue: 201;
|
||||
--secondary-color-3-saturation: 11%;
|
||||
--secondary-color-3-lightness: 67%;
|
||||
|
||||
--secondary-color-4: hsl(201, 11%, 60%);
|
||||
--secondary-color-4-hue: 201;
|
||||
--secondary-color-4-saturation: 11%;
|
||||
--secondary-color-4-lightness: 60%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-1-hue: 44.5;
|
||||
--theme-nuance-color-1-saturation: 96.7%;
|
||||
--theme-nuance-color-1-lightness: 52.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-2-hue: 44.5;
|
||||
--theme-nuance-color-2-saturation: 96.7%;
|
||||
--theme-nuance-color-2-lightness: 52.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-3-hue: 44.5;
|
||||
--theme-nuance-color-3-saturation: 96.7%;
|
||||
--theme-nuance-color-3-lightness: 52.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%);
|
||||
--theme-nuance-color-4-hue: 44.5;
|
||||
--theme-nuance-color-4-saturation: 96.7%;
|
||||
--theme-nuance-color-4-lightness: 52.9%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(232, 40%, 45%);
|
||||
--theme-orange-color: #e76f51;
|
||||
--theme-yellow-color: #ffd95f;
|
||||
--theme-green-color: #A3BE8C;
|
||||
--theme-purple-color: hsl(232, 30%, 40%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--theme-nuance-color-3);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--secondary-color-1);
|
||||
--button-alert-color-hover: var(--theme-purple-color);
|
||||
--button-alert-border-hover: var(--theme-purple-color);
|
||||
|
||||
--button-alert-text-active: var(--secondary-color-1);
|
||||
--button-alert-color-active: var(--theme-red-color);
|
||||
--button-alert-border-active: var(--theme-red-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text: var(--primary-color-1);
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(201,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 100%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text: var(--secondary-color-1);
|
||||
--button-secondary-color: var(--primary-color-3);
|
||||
--button-secondary-border: var(--primary-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(44.5,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover: var(--primary-color-4);
|
||||
--button-secondary-border-hover: var(--primary-color-4);
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active: var(--secondary-color-1);
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(201,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(201,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
}
|
||||
201
tools/server/public_legacy/theme-ketivah.css
Executable file
@@ -0,0 +1,201 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
|
||||
.theme-ketivah {
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(0, 0%, 99.2%);
|
||||
--primary-color-1-hue: 0;
|
||||
--primary-color-1-saturation: 0%;
|
||||
--primary-color-1-lightness: 99.2%;
|
||||
|
||||
--primary-color-2: hsl(0, 0%, 95%);
|
||||
--primary-color-2-hue: 0;
|
||||
--primary-color-2-saturation: 0%;
|
||||
--primary-color-2-lightness: 95%;
|
||||
|
||||
--primary-color-3: hsl(0, 0%, 88%);
|
||||
--primary-color-3-hue: 0;
|
||||
--primary-color-3-saturation: 0%;
|
||||
--primary-color-3-lightness: 88%;
|
||||
|
||||
--primary-color-4: hsl(0, 0%, 80%);
|
||||
--primary-color-4-hue: 0;
|
||||
--primary-color-4-saturation: 0%;
|
||||
--primary-color-4-lightness: 80%;
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(0, 0%, 20%);
|
||||
--secondary-color-1-hue: 0;
|
||||
--secondary-color-1-saturation: 0%;
|
||||
--secondary-color-1-lightness: 20%;
|
||||
|
||||
--secondary-color-2: hsl(0, 0%, 23.1%);
|
||||
--secondary-color-2-hue: 0;
|
||||
--secondary-color-2-saturation: 0%;
|
||||
--secondary-color-2-lightness: 23.1%;
|
||||
|
||||
--secondary-color-3: hsl(0, 0%, 29%);
|
||||
--secondary-color-3-hue: 0;
|
||||
--secondary-color-3-saturation: 0%;
|
||||
--secondary-color-3-lightness: 29%;
|
||||
|
||||
--secondary-color-4: hsl(0, 0.0%, 36.1%);
|
||||
--secondary-color-4-hue: 0.0;
|
||||
--secondary-color-4-saturation: 0.0%;
|
||||
--secondary-color-4-lightness: 36.1%;
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(165.2, 0%, 35.1%);
|
||||
--theme-nuance-color-1-hue: 165.2;
|
||||
--theme-nuance-color-1-saturation: 82.1%;
|
||||
--theme-nuance-color-1-lightness: 35.1%;
|
||||
|
||||
--theme-nuance-color-2: hsl(165.2, 0%, 35.1%);
|
||||
--theme-nuance-color-2-hue: 165.2;
|
||||
--theme-nuance-color-2-saturation: 82.1%;
|
||||
--theme-nuance-color-2-lightness: 35.1%;
|
||||
|
||||
--theme-nuance-color-3: hsl(165.2, 0%, 35.3%);
|
||||
--theme-nuance-color-3-hue: 165.2;
|
||||
--theme-nuance-color-3-saturation: 81.1%;
|
||||
--theme-nuance-color-3-lightness: 35.3%;
|
||||
|
||||
--theme-nuance-color-4: hsl(164.9, 0%, 27.6%);
|
||||
--theme-nuance-color-4-hue: 164.9;
|
||||
--theme-nuance-color-4-saturation: 81.6%;
|
||||
--theme-nuance-color-4-lightness: 27.6%;
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(0.3, 80.0%, 50.0%);
|
||||
--theme-orange-color: #e76f51;
|
||||
--theme-yellow-color: hsl(60, 70.6%, 73.3%);
|
||||
--theme-green-color: #A3BE8C;
|
||||
--theme-purple-color: hsl(0.3, 70.0%, 45.0%);
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--primary-color-4);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--primary-color-1);
|
||||
--button-alert-color-hover: var(--theme-purple-color);
|
||||
--button-alert-border-hover: var(--theme-purple-color);
|
||||
|
||||
--button-alert-text-active: var(--primary-color-1);
|
||||
--button-alert-color-active: var(--theme-red-color);
|
||||
--button-alert-border-active: var(--theme-red-color);
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 100%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 50%));
|
||||
|
||||
--button-secondary-color: var(--primary-color-3);
|
||||
--button-secondary-border: var(--primary-color-3);
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover: var(--primary-color-4);
|
||||
--button-secondary-border-hover: var(--primary-color-4);
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-4-saturation) - 100%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-4-saturation) - 100%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
--loading-color-1: #eeeeee00;
|
||||
--loading-color-2: #eeeeeeff;
|
||||
}
|
||||
216
tools/server/public_legacy/theme-mangotango.css
Executable file
@@ -0,0 +1,216 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
/* Inspiration from llama.cpp logo/banner https://github.com/ggerganov/llama.cpp#readme */
|
||||
|
||||
.theme-mangotango {
|
||||
|
||||
--primary-color-1: hsl(192, 8.5%, 11.6%);
|
||||
--primary-color-2: hsl(192, 8.5%, 21%);
|
||||
--primary-color-3: hsl(192, 8.5%, 30%);
|
||||
--primary-color-4: hsl(192, 8.5%, 40%);
|
||||
|
||||
--secondary-color-1: hsl(192, 8.5%, 80%);
|
||||
--secondary-color-2: hsl(192, 8.5%, 73%);
|
||||
--secondary-color-3: hsl(192, 8.5%, 66%);
|
||||
--secondary-color-4: hsl(192, 8.5%, 60%);
|
||||
|
||||
--theme-nuance-color-1: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-2: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-3: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-4: hsl(23.1, 100%, 60.2%);
|
||||
|
||||
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(192, 8.5%, 11.6%);
|
||||
--primary-color-1-saturation: 8.5%;
|
||||
--primary-color-1-lightness: 11.6%;
|
||||
|
||||
--primary-color-2: hsl(192, 8.5%, 21%);
|
||||
--primary-color-2-saturation: 8.5%;
|
||||
--primary-color-2-lightness: 21%;
|
||||
|
||||
--primary-color-3: hsl(192, 8.5%, 30%);
|
||||
--primary-color-3-saturation: 8.5%;
|
||||
--primary-color-3-lightness: 30%;
|
||||
|
||||
--primary-color-4: hsl(192, 8.5%, 40%);
|
||||
--primary-color-4-saturation: 8.5%;
|
||||
--primary-color-4-lightness: 40%;
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(192, 8.5%, 80%);
|
||||
--secondary-color-1-saturation: 8.5%;
|
||||
--secondary-color-1-lightness: 80%;
|
||||
|
||||
--secondary-color-2: hsl(192, 8.5%, 73%);
|
||||
--secondary-color-2-saturation: 8.5%;
|
||||
--secondary-color-2-lightness: 73%;
|
||||
|
||||
--secondary-color-3: hsl(192, 8.5%, 66%);
|
||||
--secondary-color-3-saturation: 8.5%;
|
||||
--secondary-color-3-lightness: 66%;
|
||||
|
||||
--secondary-color-4: hsl(192, 8.5%, 60%);
|
||||
--secondary-color-4-saturation: 8.5%;
|
||||
--secondary-color-4-lightness: 60%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-1-saturation: 100%;
|
||||
--theme-nuance-color-1-lightness: 60.2%;
|
||||
|
||||
--theme-nuance-color-2: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-2-saturation: 100%;
|
||||
--theme-nuance-color-2-lightness: 60.2%;
|
||||
|
||||
--theme-nuance-color-3: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-3-saturation: 100%;
|
||||
--theme-nuance-color-3-lightness: 60.2%;
|
||||
|
||||
--theme-nuance-color-4: hsl(23.1, 100%, 60.2%);
|
||||
--theme-nuance-color-4-saturation: 100%;
|
||||
--theme-nuance-color-4-lightness: 60.2%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(325, 60%, 50%);
|
||||
--theme-orange-color: #e76f51;
|
||||
--theme-yellow-color: #ffd95f;
|
||||
--theme-green-color: #A3BE8C;
|
||||
--theme-blue-color: hsl(192, 95%, 40%);
|
||||
--theme-purple-color: hsl(192, 80%, 35%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--theme-nuance-color-3);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--secondary-color-1);
|
||||
--button-alert-color-hover: var(--theme-purple-color);
|
||||
--button-alert-border-hover: var(--theme-purple-color);
|
||||
|
||||
--button-alert-text-active: var(--secondary-color-1);
|
||||
--button-alert-color-active: var(--theme-blue-color);
|
||||
--button-alert-border-active: var(--theme-blue-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text: var(--primary-color-1);
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(192,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 100%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text: var(--secondary-color-1);
|
||||
--button-secondary-color: var(--primary-color-3);
|
||||
--button-secondary-border: var(--primary-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(23.1,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover: var(--primary-color-4);
|
||||
--button-secondary-border-hover: var(--primary-color-4);
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active: var(--secondary-color-1);
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(192,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(192,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
}
|
||||
221
tools/server/public_legacy/theme-playground.css
Executable file
@@ -0,0 +1,221 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
/* Inspiration from OpenAI's Playground platform https://platform.openai.com/playground/ */
|
||||
|
||||
.theme-playground {
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(0, 0%, 99.2%);
|
||||
--primary-color-1-hue: 0;
|
||||
--primary-color-1-saturation: 0%;
|
||||
--primary-color-1-lightness: 99.2%;
|
||||
|
||||
--primary-color-2: hsl(0, 0%, 95%);
|
||||
--primary-color-2-hue: 0;
|
||||
--primary-color-2-saturation: 0%;
|
||||
--primary-color-2-lightness: 95%;
|
||||
|
||||
--primary-color-3: hsl(0, 0%, 88%);
|
||||
--primary-color-3-hue: 0;
|
||||
--primary-color-3-saturation: 0%;
|
||||
--primary-color-3-lightness: 88%;
|
||||
|
||||
--primary-color-4: hsl(0, 0%, 80%);
|
||||
--primary-color-4-hue: 0;
|
||||
--primary-color-4-saturation: 0%;
|
||||
--primary-color-4-lightness: 80%;
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(0, 0%, 20%);
|
||||
--secondary-color-1-hue: 0;
|
||||
--secondary-color-1-saturation: 0%;
|
||||
--secondary-color-1-lightness: 20%;
|
||||
|
||||
--secondary-color-2: hsl(0, 0%, 23.1%);
|
||||
--secondary-color-2-hue: 0;
|
||||
--secondary-color-2-saturation: 0%;
|
||||
--secondary-color-2-lightness: 23.1%;
|
||||
|
||||
--secondary-color-3: hsl(0, 0%, 29%);
|
||||
--secondary-color-3-hue: 0;
|
||||
--secondary-color-3-saturation: 0%;
|
||||
--secondary-color-3-lightness: 29%;
|
||||
|
||||
--secondary-color-4: hsl(0, 0%, 36.1%);
|
||||
--secondary-color-4-hue: 0;
|
||||
--secondary-color-4-saturation: 0%;
|
||||
--secondary-color-4-lightness: 36.1%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(165.2, 82.1%, 35.1%);
|
||||
--theme-nuance-color-1-hue: 165.2;
|
||||
--theme-nuance-color-1-saturation: 82.1%;
|
||||
--theme-nuance-color-1-lightness: 35.1%;
|
||||
|
||||
--theme-nuance-color-2: hsl(165.2, 82.1%, 35.1%);
|
||||
--theme-nuance-color-2-hue: 165.2;
|
||||
--theme-nuance-color-2-saturation: 82.1%;
|
||||
--theme-nuance-color-2-lightness: 35.1%;
|
||||
|
||||
--theme-nuance-color-3: hsl(165.2, 81.1%, 35.3%);
|
||||
--theme-nuance-color-3-hue: 165.2;
|
||||
--theme-nuance-color-3-saturation: 81.1%;
|
||||
--theme-nuance-color-3-lightness: 35.3%;
|
||||
|
||||
--theme-nuance-color-4: hsl(164.9, 81.6%, 27.6%);
|
||||
--theme-nuance-color-4-hue: 164.9;
|
||||
--theme-nuance-color-4-saturation: 81.6%;
|
||||
--theme-nuance-color-4-lightness: 27.6%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(0.3, 80%, 50%);
|
||||
--theme-orange-color: #e76f51;
|
||||
--theme-yellow-color: hsl(60, 70.6%, 73.3%);
|
||||
--theme-green-color: #A3BE8C;
|
||||
--theme-purple-color: hsl(0.3, 70%, 45%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--primary-color-4);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--primary-color-1);
|
||||
--button-alert-color-hover: var(--theme-purple-color);
|
||||
--button-alert-border-hover: var(--theme-purple-color);
|
||||
|
||||
--button-alert-text-active: var(--primary-color-1);
|
||||
--button-alert-color-active: var(--theme-red-color);
|
||||
--button-alert-border-active: var(--theme-red-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-1-saturation) - 100%),
|
||||
calc(var(--primary-color-1-lightness) + 100%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 100%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 100%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 50%));
|
||||
|
||||
--button-secondary-color: var(--primary-color-3);
|
||||
--button-secondary-border: var(--primary-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover: var(--primary-color-4);
|
||||
--button-secondary-border-hover: var(--primary-color-4);
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active:
|
||||
hsl(165.2,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(0,
|
||||
calc(var(--primary-color-4-saturation) - 30%),
|
||||
calc(var(--primary-color-4-lightness) - 15%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text: var(--primary-color-4);
|
||||
--button-tertiary-color: var(--primary-color-2);
|
||||
--button-tertiary-border: var(--primary-color-2);
|
||||
|
||||
}
|
||||
253
tools/server/public_legacy/theme-polarnight.css
Executable file
@@ -0,0 +1,253 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
/* Inspiration from Nord Theme https://www.nordtheme.com/docs/colors-and-palettes */
|
||||
|
||||
.theme-polarnight {
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(220.0, 16.4%, 21.6%) ;
|
||||
--primary-color-1-hue: 220.0;
|
||||
--primary-color-1-saturation: 16.4%;
|
||||
--primary-color-1-lightness: 21.6%;
|
||||
|
||||
--primary-color-2: hsl(221.7, 16.3%, 27.6%) ;
|
||||
-primary-color-2-hue: 221.7;
|
||||
--primary-color-2-saturation: 16.3%;
|
||||
--primary-color-2-lightness: 27.6%;
|
||||
|
||||
--primary-color-3: hsl(220.0, 16.8%, 31.6%) ;
|
||||
--primary-color-3-hue: 220.0;
|
||||
--primary-color-3-saturation: 16.8%;
|
||||
--primary-color-3-lightness: 31.6%;
|
||||
|
||||
--primary-color-4: hsl(220.0, 16.5%, 35.7%);
|
||||
--primary-color-4-hue: 220.0;
|
||||
--primary-color-4-saturation: 16.5%;
|
||||
--primary-color-4-lightness: 35.7%;
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(217.5, 26.7%, 94.1%);
|
||||
--secondary-color-1-hue: 217.5;
|
||||
--secondary-color-1-saturation: 26.7%;
|
||||
--secondary-color-1-lightness: 94.1%;
|
||||
|
||||
--secondary-color-2: hsl(218.2, 26.8%, 92.0%);
|
||||
--secondary-color-2-hue: 218.2;
|
||||
--secondary-color-2-saturation: 26.8%;
|
||||
--secondary-color-2-lightness: 92.0%;
|
||||
|
||||
--secondary-color-3: hsl(218.8, 27.9%, 88.0%);
|
||||
--secondary-color-3-hue: 218.8;
|
||||
--secondary-color-3-saturation: 27.9%;
|
||||
--secondary-color-3-lightness: 88.0%;
|
||||
|
||||
--secondary-color-4: hsl(218.8, 18.3%, 81.8%);
|
||||
--secondary-color-4-hue: 218.8;
|
||||
--secondary-color-4-saturation: 18.3%;
|
||||
--secondary-color-4-lightness: 81.8%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%);
|
||||
--theme-nuance-color-1-hue: 178.7;
|
||||
--theme-nuance-color-1-saturation: 25.1%;
|
||||
--theme-nuance-color-1-lightness: 64.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%);
|
||||
--theme-nuance-color-2-hue: 193.3;
|
||||
--theme-nuance-color-2-saturation: 43.4%;
|
||||
--theme-nuance-color-2-lightness: 67.5%;
|
||||
|
||||
--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%);
|
||||
--theme-nuance-color-3-hue: 210.0;
|
||||
--theme-nuance-color-3-saturation: 34.0%;
|
||||
--theme-nuance-color-3-lightness: 63.1%;
|
||||
|
||||
--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%);
|
||||
--theme-nuance-color-4-hue: 213.1;
|
||||
--theme-nuance-color-4-saturation: 32.0%;
|
||||
--theme-nuance-color-4-lightness: 52.2%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(354.3, 42.3%, 56.5%);
|
||||
--theme-orange-color: hsl(20, 85%, 50%);
|
||||
--theme-yellow-color: hsl(20, 75%, 45%);
|
||||
--theme-green-color: hsl( 92.4, 27.8%, 64.7%);
|
||||
--theme-purple-color: hsl(311.1, 20.2%, 63.1%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------------ */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--theme-nuance-color-3);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--secondary-color-1);
|
||||
--button-alert-color-hover: var(--theme-yellow-color);
|
||||
--button-alert-border-hover: var(--theme-yellow-color);
|
||||
|
||||
--button-alert-text-active: var(--secondary-color-1);
|
||||
--button-alert-color-active: var(--theme-orange-color);
|
||||
--button-alert-border-active: var(--theme-orange-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text: var(--secondary-color-1);
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(217.5,
|
||||
calc(var(--secondary-color-1-saturation) - 35%),
|
||||
calc(var(--secondary-color-1-lightness) + 30%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 35%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 50%));
|
||||
|
||||
--button-secondary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
--button-secondary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
--button-secondary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 25%));
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 15%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
}
|
||||
251
tools/server/public_legacy/theme-snowstorm.css
Executable file
@@ -0,0 +1,251 @@
|
||||
/* Author: Yazan Agha-Schrader */
|
||||
/* Inspiration from Nord Theme https://www.nordtheme.com/docs/colors-and-palettes */
|
||||
|
||||
.theme-snowstorm {
|
||||
|
||||
/* ---------- PRIMARY COLORS ----------------- */
|
||||
--primary-color-1: hsl(217.5, 26.7%, 94.1%);
|
||||
--primary-color-1-hue: 217.5;
|
||||
--primary-color-1-saturation: 26.7%;
|
||||
--primary-color-1-lightness: 94.1%;
|
||||
|
||||
--primary-color-2: hsl(218.2, 26.8%, 92.0%);
|
||||
--primary-color-2-hue: 218.2;
|
||||
--primary-color-2-saturation: 26.8%;
|
||||
--primary-color-2-lightness: 92.0%;
|
||||
|
||||
--primary-color-3: hsl(218.8, 27.9%, 88.0%);
|
||||
--primary-color-3-hue: 218.8;
|
||||
--primary-color-3-saturation: 27.9%;
|
||||
--primary-color-3-lightness: 88.0%;
|
||||
|
||||
--primary-color-4: hsl(218.8, 18.3%, 81.8%);
|
||||
--primary-color-4-hue: 218.8;
|
||||
--primary-color-4-saturation: 18.3%;
|
||||
--primary-color-4-lightness: 81.8%;
|
||||
|
||||
|
||||
/* ---------- SECONDARY COLORS --------------- */
|
||||
--secondary-color-1: hsl(220.0, 16.4%, 21.6%);
|
||||
--secondary-color-1-hue: 220.0;
|
||||
--secondary-color-1-saturation: 16.4%;
|
||||
--secondary-color-1-lightness: 21.6%;
|
||||
|
||||
--secondary-color-2: hsl(221.7, 16.3%, 27.6%);
|
||||
--secondary-color-2-hue: 221.7;
|
||||
--secondary-color-2-saturation: 16.3%;
|
||||
--secondary-color-2-lightness: 27.6%;
|
||||
|
||||
--secondary-color-3: hsl(220.0, 16.8%, 31.6%);
|
||||
--secondary-color-3-hue: 220.0;
|
||||
--secondary-color-3-saturation: 16.8%;
|
||||
--secondary-color-3-lightness: 31.6%;
|
||||
|
||||
--secondary-color-4: hsl(220.0, 16.5%, 35.7%);
|
||||
--secondary-color-4-hue: 220.0;
|
||||
--secondary-color-4-saturation: 16.5%;
|
||||
--secondary-color-4-lightness: 35.7%;
|
||||
|
||||
|
||||
|
||||
/* ----------- NUANCES COLORS ---------------- */
|
||||
--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%);
|
||||
--theme-nuance-color-1-hue: 178.7;
|
||||
--theme-nuance-color-1-saturation: 25.1%;
|
||||
--theme-nuance-color-1-lightness: 64.9%;
|
||||
|
||||
--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%);
|
||||
--theme-nuance-color-2-hue: 193.3;
|
||||
--theme-nuance-color-2-saturation: 43.4%;
|
||||
--theme-nuance-color-2-lightness: 67.5%;
|
||||
|
||||
--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%);
|
||||
--theme-nuance-color-3-hue: 210.0;
|
||||
--theme-nuance-color-3-saturation: 34.0%;
|
||||
--theme-nuance-color-3-lightness: 63.1%;
|
||||
|
||||
--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%);
|
||||
--theme-nuance-color-4-hue: 213.1;
|
||||
--theme-nuance-color-4-saturation: 32.0%;
|
||||
--theme-nuance-color-4-lightness: 52.2%;
|
||||
|
||||
|
||||
|
||||
/* ----------- ROYGP COLORS ------------------ */
|
||||
--theme-red-color: hsl(32.5, 80%, 50%);
|
||||
--theme-orange-color: hsl(32.5, 70%, 45%);
|
||||
--theme-yellow-color: hsl(40.0, 0.6%, 73.3%);
|
||||
--theme-green-color: hsl(92.4, 27.8%, 64.7%);
|
||||
--theme-purple-color: hsl(311.1, 20.2%, 63.1%);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--background-color-1: var(--primary-color-1);
|
||||
--background-color-2: var(--primary-color-2);
|
||||
--background-color-3: var(--primary-color-3);
|
||||
--background-color-4: var(--primary-color-4);
|
||||
|
||||
--border-color-1: var(--primary-color-2);
|
||||
--border-color-2: var(--primary-color-3);
|
||||
--border-color-3: var(--primary-color-4);
|
||||
|
||||
--border-focus-color: var(--theme-nuance-color-2);
|
||||
--border-focus-shadow: var(--theme-nuance-color-1);
|
||||
|
||||
--text-color-plain: var(--secondary-color-1);
|
||||
--text-color-subtile-1: var(--secondary-color-2);
|
||||
--text-color-subtile-2: var(--secondary-color-3);
|
||||
|
||||
--code-background-color: var(--secondary-color-2);
|
||||
--code-text-color: var(--primary-color-2);
|
||||
|
||||
--ui-range-thumb-color: var(--theme-nuance-color-3);
|
||||
--ui-range-thumb-border: var(--ui-ranger-thumb-color);
|
||||
|
||||
--textarea-border-color: var(--secondary-color-4);
|
||||
|
||||
--chat-id-color: var(--theme-nuance-color-4);
|
||||
|
||||
|
||||
|
||||
/* ------------------------------------------- */
|
||||
--button-alert-text-hover: var(--primary-color-1);
|
||||
--button-alert-color-hover: var(--theme-orange-color);
|
||||
--button-alert-border-hover: var(--theme-orange-color);
|
||||
|
||||
--button-alert-text-active: var(--primary-color-1);
|
||||
--button-alert-color-active: var(--theme-red-color);
|
||||
--button-alert-border-active: var(--theme-red-color);
|
||||
|
||||
|
||||
|
||||
/* ----------- PRIMARY BUTTONS --------------- */
|
||||
/* - button should immediately catch the eye - */
|
||||
--button-primary-text: var(--secondary-color-1);
|
||||
--button-primary-color: var(--theme-nuance-color-3);
|
||||
--button-primary-border: var(--theme-nuance-color-3);
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-primary-text-hover:
|
||||
hsl(217.5,
|
||||
calc(var(--secondary-color-1-saturation) + 35%),
|
||||
calc(var(--secondary-color-1-lightness) - 30%));
|
||||
|
||||
--button-primary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
--button-primary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 2%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 10%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-primary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 35%));
|
||||
|
||||
--button-primary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
--button-primary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 10%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 25%));
|
||||
|
||||
|
||||
|
||||
/* ---------- SECONDARY BUTTONS -------------- */
|
||||
/* these should NOT immediately catch the eye */
|
||||
--button-secondary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 50%));
|
||||
|
||||
--button-secondary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
--button-secondary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 10%));
|
||||
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-secondary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 20%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 80%));
|
||||
|
||||
--button-secondary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
--button-secondary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 22%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 1%));
|
||||
|
||||
|
||||
/* ---------active--------- */
|
||||
--button-secondary-text-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) + 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 55%));
|
||||
|
||||
--button-secondary-color-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-secondary-border-active:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 30%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
|
||||
|
||||
/* ---------- TERTIARY BUTTONS --------------- */
|
||||
/* ---------- disabled buttons --------------- */
|
||||
--button-tertiary-text:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
/* ---------hover---------- */
|
||||
--button-tertiary-text-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) - 5%));
|
||||
|
||||
--button-tertiary-color-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
--button-tertiary-border-hover:
|
||||
hsl(210,
|
||||
calc(var(--theme-nuance-color-3-saturation) - 40%),
|
||||
calc(var(--theme-nuance-color-3-lightness) + 20%));
|
||||
|
||||
}
|
||||
266
tools/server/public_simplechat/datautils.mjs
Normal file
@@ -0,0 +1,266 @@
|
||||
//@ts-check
|
||||
// Helpers to work with different data types
|
||||
// by Humans for All
|
||||
//
|
||||
|
||||
/**
|
||||
* Given the limited context size of local LLMs and , many a times when context gets filled
|
||||
* between the prompt and the response, it can lead to repeating text garbage generation.
|
||||
* And many a times setting penalty wrt repeatation leads to over-intelligent garbage
|
||||
* repeatation with slight variations. These garbage inturn can lead to overloading of the
|
||||
* available model context, leading to less valuable response for subsequent prompts/queries,
|
||||
* if chat history is sent to ai model.
|
||||
*
|
||||
* So two simple minded garbage trimming logics are experimented below.
|
||||
* * one based on progressively-larger-substring-based-repeat-matching-with-partial-skip and
|
||||
* * another based on char-histogram-driven garbage trimming.
|
||||
* * in future characteristic of histogram over varying lengths could be used to allow for
|
||||
* a more aggressive and adaptive trimming logic.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Simple minded logic to help remove repeating garbage at end of the string.
|
||||
* The repeatation needs to be perfectly matching.
|
||||
*
|
||||
* The logic progressively goes on probing for longer and longer substring based
|
||||
* repeatation, till there is no longer repeatation. Inturn picks the one with
|
||||
* the longest chain.
|
||||
*
|
||||
* @param {string} sIn
|
||||
* @param {number} maxSubL
|
||||
* @param {number} maxMatchLenThreshold
|
||||
*/
|
||||
export function trim_repeat_garbage_at_end(sIn, maxSubL=10, maxMatchLenThreshold=40) {
|
||||
let rCnt = [0];
|
||||
let maxMatchLen = maxSubL;
|
||||
let iMML = -1;
|
||||
for(let subL=1; subL < maxSubL; subL++) {
|
||||
rCnt.push(0);
|
||||
let i;
|
||||
let refS = sIn.substring(sIn.length-subL, sIn.length);
|
||||
for(i=sIn.length; i > 0; i -= subL) {
|
||||
let curS = sIn.substring(i-subL, i);
|
||||
if (refS != curS) {
|
||||
let curMatchLen = rCnt[subL]*subL;
|
||||
if (maxMatchLen < curMatchLen) {
|
||||
maxMatchLen = curMatchLen;
|
||||
iMML = subL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
rCnt[subL] += 1;
|
||||
}
|
||||
}
|
||||
console.debug("DBUG:DU:TrimRepeatGarbage:", rCnt);
|
||||
if ((iMML == -1) || (maxMatchLen < maxMatchLenThreshold)) {
|
||||
return {trimmed: false, data: sIn};
|
||||
}
|
||||
console.debug("DBUG:TrimRepeatGarbage:TrimmedCharLen:", maxMatchLen);
|
||||
let iEnd = sIn.length - maxMatchLen;
|
||||
return { trimmed: true, data: sIn.substring(0, iEnd) };
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Simple minded logic to help remove repeating garbage at end of the string, till it cant.
|
||||
* If its not able to trim, then it will try to skip a char at end and then trim, a few times.
|
||||
* This ensures that even if there are multiple runs of garbage with different patterns, the
|
||||
* logic still tries to munch through them.
|
||||
*
|
||||
* @param {string} sIn
|
||||
* @param {number} maxSubL
|
||||
* @param {number | undefined} [maxMatchLenThreshold]
|
||||
*/
|
||||
export function trim_repeat_garbage_at_end_loop(sIn, maxSubL, maxMatchLenThreshold, skipMax=16) {
|
||||
let sCur = sIn;
|
||||
let sSaved = "";
|
||||
let iTry = 0;
|
||||
while(true) {
|
||||
let got = trim_repeat_garbage_at_end(sCur, maxSubL, maxMatchLenThreshold);
|
||||
if (got.trimmed != true) {
|
||||
if (iTry == 0) {
|
||||
sSaved = got.data;
|
||||
}
|
||||
iTry += 1;
|
||||
if (iTry >= skipMax) {
|
||||
return sSaved;
|
||||
}
|
||||
got.data = got.data.substring(0,got.data.length-1);
|
||||
} else {
|
||||
iTry = 0;
|
||||
}
|
||||
sCur = got.data;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A simple minded try trim garbage at end using histogram driven characteristics.
|
||||
* There can be variation in the repeatations, as long as no new char props up.
|
||||
*
|
||||
* This tracks the chars and their frequency in a specified length of substring at the end
|
||||
* and inturn checks if moving further into the generated text from the end remains within
|
||||
* the same char subset or goes beyond it and based on that either trims the string at the
|
||||
* end or not. This allows to filter garbage at the end, including even if there are certain
|
||||
* kind of small variations in the repeated text wrt position of seen chars.
|
||||
*
|
||||
* Allow the garbage to contain upto maxUniq chars, but at the same time ensure that
|
||||
* a given type of char ie numerals or alphabets or other types dont cross the specified
|
||||
* maxType limit. This allows intermixed text garbage to be identified and trimmed.
|
||||
*
|
||||
* ALERT: This is not perfect and only provides a rough garbage identification logic.
|
||||
* Also it currently only differentiates between character classes wrt english.
|
||||
*
|
||||
* @param {string} sIn
|
||||
* @param {number} maxType
|
||||
* @param {number} maxUniq
|
||||
* @param {number} maxMatchLenThreshold
|
||||
*/
|
||||
export function trim_hist_garbage_at_end(sIn, maxType, maxUniq, maxMatchLenThreshold) {
|
||||
if (sIn.length < maxMatchLenThreshold) {
|
||||
return { trimmed: false, data: sIn };
|
||||
}
|
||||
let iAlp = 0;
|
||||
let iNum = 0;
|
||||
let iOth = 0;
|
||||
// Learn
|
||||
let hist = {};
|
||||
let iUniq = 0;
|
||||
for(let i=0; i<maxMatchLenThreshold; i++) {
|
||||
let c = sIn[sIn.length-1-i];
|
||||
if (c in hist) {
|
||||
hist[c] += 1;
|
||||
} else {
|
||||
if(c.match(/[0-9]/) != null) {
|
||||
iNum += 1;
|
||||
} else if(c.match(/[A-Za-z]/) != null) {
|
||||
iAlp += 1;
|
||||
} else {
|
||||
iOth += 1;
|
||||
}
|
||||
iUniq += 1;
|
||||
if (iUniq >= maxUniq) {
|
||||
break;
|
||||
}
|
||||
hist[c] = 1;
|
||||
}
|
||||
}
|
||||
console.debug("DBUG:TrimHistGarbage:", hist);
|
||||
if ((iAlp > maxType) || (iNum > maxType) || (iOth > maxType)) {
|
||||
return { trimmed: false, data: sIn };
|
||||
}
|
||||
// Catch and Trim
|
||||
for(let i=0; i < sIn.length; i++) {
|
||||
let c = sIn[sIn.length-1-i];
|
||||
if (!(c in hist)) {
|
||||
if (i < maxMatchLenThreshold) {
|
||||
return { trimmed: false, data: sIn };
|
||||
}
|
||||
console.debug("DBUG:TrimHistGarbage:TrimmedCharLen:", i);
|
||||
return { trimmed: true, data: sIn.substring(0, sIn.length-i+1) };
|
||||
}
|
||||
}
|
||||
console.debug("DBUG:TrimHistGarbage:Trimmed fully");
|
||||
return { trimmed: true, data: "" };
|
||||
}
|
||||
|
||||
/**
|
||||
* Keep trimming repeatedly using hist_garbage logic, till you no longer can.
|
||||
* This ensures that even if there are multiple runs of garbage with different patterns,
|
||||
* the logic still tries to munch through them.
|
||||
*
|
||||
* @param {any} sIn
|
||||
* @param {number} maxType
|
||||
* @param {number} maxUniq
|
||||
* @param {number} maxMatchLenThreshold
|
||||
*/
|
||||
export function trim_hist_garbage_at_end_loop(sIn, maxType, maxUniq, maxMatchLenThreshold) {
|
||||
let sCur = sIn;
|
||||
while (true) {
|
||||
let got = trim_hist_garbage_at_end(sCur, maxType, maxUniq, maxMatchLenThreshold);
|
||||
if (!got.trimmed) {
|
||||
return got.data;
|
||||
}
|
||||
sCur = got.data;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try trim garbage at the end by using both the hist-driven-garbage-trimming as well as
|
||||
* skip-a-bit-if-reqd-then-repeat-pattern-based-garbage-trimming, with blind retrying.
|
||||
* @param {string} sIn
|
||||
*/
|
||||
export function trim_garbage_at_end(sIn) {
|
||||
let sCur = sIn;
|
||||
for(let i=0; i<2; i++) {
|
||||
sCur = trim_hist_garbage_at_end_loop(sCur, 8, 24, 72);
|
||||
sCur = trim_repeat_garbage_at_end_loop(sCur, 32, 72, 12);
|
||||
}
|
||||
return sCur;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* NewLines array helper.
|
||||
* Allow for maintaining a list of lines.
|
||||
* Allow for a line to be builtup/appended part by part.
|
||||
*/
|
||||
export class NewLines {
|
||||
|
||||
constructor() {
|
||||
/** @type {string[]} */
|
||||
this.lines = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts lines from the passed string and inturn either
|
||||
* append to a previous partial line or add a new line.
|
||||
* @param {string} sLines
|
||||
*/
|
||||
add_append(sLines) {
|
||||
let aLines = sLines.split("\n");
|
||||
let lCnt = 0;
|
||||
for(let line of aLines) {
|
||||
lCnt += 1;
|
||||
// Add back newline removed if any during split
|
||||
if (lCnt < aLines.length) {
|
||||
line += "\n";
|
||||
} else {
|
||||
if (sLines.endsWith("\n")) {
|
||||
line += "\n";
|
||||
}
|
||||
}
|
||||
// Append if required
|
||||
if (lCnt == 1) {
|
||||
let lastLine = this.lines[this.lines.length-1];
|
||||
if (lastLine != undefined) {
|
||||
if (!lastLine.endsWith("\n")) {
|
||||
this.lines[this.lines.length-1] += line;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add new line
|
||||
this.lines.push(line);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shift the oldest/earliest/0th line in the array. [Old-New|Earliest-Latest]
|
||||
* Optionally control whether only full lines (ie those with newline at end) will be returned
|
||||
* or will a partial line without a newline at end (can only be the last line) be returned.
|
||||
* @param {boolean} bFullWithNewLineOnly
|
||||
*/
|
||||
shift(bFullWithNewLineOnly=true) {
|
||||
let line = this.lines[0];
|
||||
if (line == undefined) {
|
||||
return undefined;
|
||||
}
|
||||
if ((line[line.length-1] != "\n") && bFullWithNewLineOnly){
|
||||
return undefined;
|
||||
}
|
||||
return this.lines.shift();
|
||||
}
|
||||
|
||||
}
|
||||
51
tools/server/public_simplechat/index.html
Normal file
@@ -0,0 +1,51 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>SimpleChat LlamaCppEtal </title>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="message" content="Save Nature Save Earth" />
|
||||
<meta name="description" content="SimpleChat: trigger LLM web service endpoints /chat/completions and /completions, single/multi chat sessions" />
|
||||
<meta name="author" content="by Humans for All" />
|
||||
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
|
||||
<script type="importmap">
|
||||
{
|
||||
"imports": {
|
||||
"datautils": "./datautils.mjs",
|
||||
"ui": "./ui.mjs"
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<script src="simplechat.js" type="module" defer></script>
|
||||
<link rel="stylesheet" href="simplechat.css" />
|
||||
</head>
|
||||
<body>
|
||||
<div class="samecolumn" id="fullbody">
|
||||
|
||||
<div class="sameline" id="heading">
|
||||
<p class="heading flex-grow" > <b> SimpleChat </b> </p>
|
||||
<button id="settings">Settings</button>
|
||||
</div>
|
||||
|
||||
<div id="sessions-div" class="sameline"></div>
|
||||
|
||||
<hr>
|
||||
<div class="sameline">
|
||||
<label for="system-in">System</label>
|
||||
<textarea name="system" id="system-in" rows="2" placeholder="e.g. you are a helpful ai assistant, who provides concise answers" class="flex-grow"></textarea>
|
||||
</div>
|
||||
|
||||
<hr>
|
||||
<div id="chat-div">
|
||||
<p> You need to have javascript enabled.</p>
|
||||
</div>
|
||||
|
||||
<hr>
|
||||
<div class="sameline">
|
||||
<textarea id="user-in" class="flex-grow" rows="2" placeholder="enter your query to the ai model here" ></textarea>
|
||||
<button id="user-btn">submit</button>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
286
tools/server/public_simplechat/readme.md
Normal file
@@ -0,0 +1,286 @@
|
||||
|
||||
# SimpleChat
|
||||
|
||||
by Humans for All.
|
||||
|
||||
## quickstart
|
||||
|
||||
To run from the build dir
|
||||
|
||||
bin/llama-server -m path/model.gguf --path ../tools/server/public_simplechat
|
||||
|
||||
Continue reading for the details.
|
||||
|
||||
## overview
|
||||
|
||||
This simple web frontend, allows triggering/testing the server's /completions or /chat/completions endpoints
|
||||
in a simple way with minimal code from a common code base. Inturn additionally it tries to allow single or
|
||||
multiple independent back and forth chatting to an extent, with the ai llm model at a basic level, with their
|
||||
own system prompts.
|
||||
|
||||
This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated,
|
||||
or potentially as it is being generated, in a streamed manner from the server/ai-model.
|
||||
|
||||

|
||||
|
||||
Auto saves the chat session locally as and when the chat is progressing and inturn at a later time when you
|
||||
open SimpleChat, option is provided to restore the old chat session, if a matching one exists.
|
||||
|
||||
The UI follows a responsive web design so that the layout can adapt to available display space in a usable
|
||||
enough manner, in general.
|
||||
|
||||
Allows developer/end-user to control some of the behaviour by updating gMe members from browser's devel-tool
|
||||
console. Parallely some of the directly useful to end-user settings can also be changed using the provided
|
||||
settings ui.
|
||||
|
||||
NOTE: Current web service api doesnt expose the model context length directly, so client logic doesnt provide
|
||||
any adaptive culling of old messages nor of replacing them with summary of their content etal. However there
|
||||
is a optional sliding window based chat logic, which provides a simple minded culling of old messages from
|
||||
the chat history before sending to the ai model.
|
||||
|
||||
NOTE: Wrt options sent with the request, it mainly sets temperature, max_tokens and optionaly stream for now.
|
||||
However if someone wants they can update the js file or equivalent member in gMe as needed.
|
||||
|
||||
NOTE: One may be able to use this to chat with openai api web-service /chat/completions endpoint, in a very
|
||||
limited / minimal way. One will need to set model, openai url and authorization bearer key in settings ui.
|
||||
|
||||
|
||||
## usage
|
||||
|
||||
One could run this web frontend directly using server itself or if anyone is thinking of adding a built in web
|
||||
frontend to configure the server over http(s) or so, then run this web frontend using something like python's
|
||||
http module.
|
||||
|
||||
### running using tools/server
|
||||
|
||||
./llama-server -m path/model.gguf --path tools/server/public_simplechat [--port PORT]
|
||||
|
||||
### running using python3's server module
|
||||
|
||||
first run tools/server
|
||||
* ./llama-server -m path/model.gguf
|
||||
|
||||
next run this web front end in tools/server/public_simplechat
|
||||
* cd ../tools/server/public_simplechat
|
||||
* python3 -m http.server PORT
|
||||
|
||||
### using the front end
|
||||
|
||||
Open this simple web front end from your local browser
|
||||
|
||||
* http://127.0.0.1:PORT/index.html
|
||||
|
||||
Once inside
|
||||
|
||||
* If you want to, you can change many of the default global settings
|
||||
* the base url (ie ip addr / domain name, port)
|
||||
* chat (default) vs completion mode
|
||||
* try trim garbage in response or not
|
||||
* amount of chat history in the context sent to server/ai-model
|
||||
* oneshot or streamed mode.
|
||||
|
||||
* In completion mode
|
||||
* one normally doesnt use a system prompt in completion mode.
|
||||
* logic by default doesnt insert any role specific "ROLE: " prefix wrt each role's message.
|
||||
If the model requires any prefix wrt user role messages, then the end user has to
|
||||
explicitly add the needed prefix, when they enter their chat message.
|
||||
Similarly if the model requires any prefix to trigger assistant/ai-model response,
|
||||
then the end user needs to enter the same.
|
||||
This keeps the logic simple, while still giving flexibility to the end user to
|
||||
manage any templating/tagging requirement wrt their messages to the model.
|
||||
* the logic doesnt insert newline at the begining and end wrt the prompt message generated.
|
||||
However if the chat being sent to /completions end point has more than one role's message,
|
||||
then insert newline when moving from one role's message to the next role's message, so
|
||||
that it can be clearly identified/distinguished.
|
||||
* given that /completions endpoint normally doesnt add additional chat-templating of its
|
||||
own, the above ensures that end user can create a custom single/multi message combo with
|
||||
any tags/special-tokens related chat templating to test out model handshake. Or enduser
|
||||
can use it just for normal completion related/based query.
|
||||
|
||||
* If you want to provide a system prompt, then ideally enter it first, before entering any user query.
|
||||
Normally Completion mode doesnt need system prompt, while Chat mode can generate better/interesting
|
||||
responses with a suitable system prompt.
|
||||
* if chat.add_system_begin is used
|
||||
* you cant change the system prompt, after it is has been submitted once along with user query.
|
||||
* you cant set a system prompt, after you have submitted any user query
|
||||
* if chat.add_system_anytime is used
|
||||
* one can change the system prompt any time during chat, by changing the contents of system prompt.
|
||||
* inturn the updated/changed system prompt will be inserted into the chat session.
|
||||
* this allows for the subsequent user chatting to be driven by the new system prompt set above.
|
||||
|
||||
* Enter your query and either press enter or click on the submit button.
|
||||
If you want to insert enter (\n) as part of your chat/query to ai model, use shift+enter.
|
||||
|
||||
* Wait for the logic to communicate with the server and get the response.
|
||||
* the user is not allowed to enter any fresh query during this time.
|
||||
* the user input box will be disabled and a working message will be shown in it.
|
||||
* if trim garbage is enabled, the logic will try to trim repeating text kind of garbage to some extent.
|
||||
|
||||
* just refresh the page, to reset wrt the chat history and or system prompt and start afresh.
|
||||
|
||||
* Using NewChat one can start independent chat sessions.
|
||||
* two independent chat sessions are setup by default.
|
||||
|
||||
* When you want to print, switching ChatHistoryInCtxt to Full and clicking on the chat session button of
|
||||
interest, will display the full chat history till then wrt same, if you want full history for printing.
|
||||
|
||||
|
||||
## Devel note
|
||||
|
||||
### Reason behind this
|
||||
|
||||
The idea is to be easy enough to use for basic purposes, while also being simple and easily discernable
|
||||
by developers who may not be from web frontend background (so inturn may not be familiar with template /
|
||||
end-use-specific-language-extensions driven flows) so that they can use it to explore/experiment things.
|
||||
|
||||
And given that the idea is also to help explore/experiment for developers, some flexibility is provided
|
||||
to change behaviour easily using the devel-tools/console or provided minimal settings ui (wrt few aspects).
|
||||
Skeletal logic has been implemented to explore some of the end points and ideas/implications around them.
|
||||
|
||||
|
||||
### General
|
||||
|
||||
Me/gMe consolidates the settings which control the behaviour into one object.
|
||||
One can see the current settings, as well as change/update them using browsers devel-tool/console.
|
||||
It is attached to the document object. Some of these can also be updated using the Settings UI.
|
||||
|
||||
baseURL - the domain-name/ip-address and inturn the port to send the request.
|
||||
|
||||
bStream - control between oneshot-at-end and live-stream-as-its-generated collating and showing
|
||||
of the generated response.
|
||||
|
||||
the logic assumes that the text sent from the server follows utf-8 encoding.
|
||||
|
||||
in streaming mode - if there is any exception, the logic traps the same and tries to ensure
|
||||
that text generated till then is not lost.
|
||||
|
||||
if a very long text is being generated, which leads to no user interaction for sometime and
|
||||
inturn the machine goes into power saving mode or so, the platform may stop network connection,
|
||||
leading to exception.
|
||||
|
||||
apiEP - select between /completions and /chat/completions endpoint provided by the server/ai-model.
|
||||
|
||||
bCompletionFreshChatAlways - whether Completion mode collates complete/sliding-window history when
|
||||
communicating with the server or only sends the latest user query/message.
|
||||
|
||||
bCompletionInsertStandardRolePrefix - whether Completion mode inserts role related prefix wrt the
|
||||
messages that get inserted into prompt field wrt /Completion endpoint.
|
||||
|
||||
bTrimGarbage - whether garbage repeatation at the end of the generated ai response, should be
|
||||
trimmed or left as is. If enabled, it will be trimmed so that it wont be sent back as part of
|
||||
subsequent chat history. At the same time the actual trimmed text is shown to the user, once
|
||||
when it was generated, so user can check if any useful info/data was there in the response.
|
||||
|
||||
One may be able to request the ai-model to continue (wrt the last response) (if chat-history
|
||||
is enabled as part of the chat-history-in-context setting), and chances are the ai-model will
|
||||
continue starting from the trimmed part, thus allows long response to be recovered/continued
|
||||
indirectly, in many cases.
|
||||
|
||||
The histogram/freq based trimming logic is currently tuned for english language wrt its
|
||||
is-it-a-alpabetic|numeral-char regex match logic.
|
||||
|
||||
apiRequestOptions - maintains the list of options/fields to send along with api request,
|
||||
irrespective of whether /chat/completions or /completions endpoint.
|
||||
|
||||
If you want to add additional options/fields to send to the server/ai-model, and or
|
||||
modify the existing options value or remove them, for now you can update this global var
|
||||
using browser's development-tools/console.
|
||||
|
||||
For string, numeric and boolean fields in apiRequestOptions, including even those added by a
|
||||
user at runtime by directly modifying gMe.apiRequestOptions, setting ui entries will be auto
|
||||
created.
|
||||
|
||||
cache_prompt option supported by example/server is allowed to be controlled by user, so that
|
||||
any caching supported wrt system-prompt and chat history, if usable can get used. When chat
|
||||
history sliding window is enabled, cache_prompt logic may or may not kick in at the backend
|
||||
wrt same, based on aspects related to model, positional encoding, attention mechanism etal.
|
||||
However system prompt should ideally get the benefit of caching.
|
||||
|
||||
headers - maintains the list of http headers sent when request is made to the server. By default
|
||||
Content-Type is set to application/json. Additionally Authorization entry is provided, which can
|
||||
be set if needed using the settings ui.
|
||||
|
||||
iRecentUserMsgCnt - a simple minded SlidingWindow to limit context window load at Ai Model end.
|
||||
This is disabled by default. However if enabled, then in addition to latest system message, only
|
||||
the last/latest iRecentUserMsgCnt user messages after the latest system prompt and its responses
|
||||
from the ai model will be sent to the ai-model, when querying for a new response. IE if enabled,
|
||||
only user messages after the latest system message/prompt will be considered.
|
||||
|
||||
This specified sliding window user message count also includes the latest user query.
|
||||
<0 : Send entire chat history to server
|
||||
0 : Send only the system message if any to the server
|
||||
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
|
||||
|
||||
|
||||
By using gMe's iRecentUserMsgCnt and apiRequestOptions.max_tokens/n_predict one can try to control
|
||||
the implications of loading of the ai-model's context window by chat history, wrt chat response to
|
||||
some extent in a simple crude way. You may also want to control the context size enabled when the
|
||||
server loads ai-model, on the server end.
|
||||
|
||||
|
||||
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
|
||||
may not be visible. Also remember that just refreshing/reloading page in browser or for that
|
||||
matter clearing site data, dont directly override site caching in all cases. Worst case you may
|
||||
have to change port. Or in dev tools of browser, you may be able to disable caching fully.
|
||||
|
||||
|
||||
Currently the server to communicate with is maintained globally and not as part of a specific
|
||||
chat session. So if one changes the server ip/url in setting, then all chat sessions will auto
|
||||
switch to this new server, when you try using those sessions.
|
||||
|
||||
|
||||
By switching between chat.add_system_begin/anytime, one can control whether one can change
|
||||
the system prompt, anytime during the conversation or only at the beginning.
|
||||
|
||||
|
||||
### Default setup
|
||||
|
||||
By default things are setup to try and make the user experience a bit better, if possible.
|
||||
However a developer when testing the server of ai-model may want to change these value.
|
||||
|
||||
Using iRecentUserMsgCnt reduce chat history context sent to the server/ai-model to be
|
||||
just the system-prompt, prev-user-request-and-ai-response and cur-user-request, instead of
|
||||
full chat history. This way if there is any response with garbage/repeatation, it doesnt
|
||||
mess with things beyond the next question/request/query, in some ways. The trim garbage
|
||||
option also tries to help avoid issues with garbage in the context to an extent.
|
||||
|
||||
Set max_tokens to 1024, so that a relatively large previous reponse doesnt eat up the space
|
||||
available wrt next query-response. However dont forget that the server when started should
|
||||
also be started with a model context size of 1k or more, to be on safe side.
|
||||
|
||||
The /completions endpoint of tools/server doesnt take max_tokens, instead it takes the
|
||||
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
|
||||
to /completions endpoint handling code on server side.
|
||||
|
||||
NOTE: One may want to experiment with frequency/presence penalty fields in apiRequestOptions
|
||||
wrt the set of fields sent to server along with the user query, to check how the model behaves
|
||||
wrt repeatations in general in the generated text response.
|
||||
|
||||
A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by
|
||||
using the provided settings ui (for settings exposed through the ui).
|
||||
|
||||
|
||||
### OpenAi / Equivalent API WebService
|
||||
|
||||
One may be abe to handshake with OpenAI/Equivalent api web service's /chat/completions endpoint
|
||||
for a minimal chatting experimentation by setting the below.
|
||||
|
||||
* the baseUrl in settings ui
|
||||
* https://api.openai.com/v1 or similar
|
||||
|
||||
* Wrt request body - gMe.apiRequestOptions
|
||||
* model (settings ui)
|
||||
* any additional fields if required in future
|
||||
|
||||
* Wrt request headers - gMe.headers
|
||||
* Authorization (available through settings ui)
|
||||
* Bearer THE_OPENAI_API_KEY
|
||||
* any additional optional header entries like "OpenAI-Organization", "OpenAI-Project" or so
|
||||
|
||||
NOTE: Not tested, as there is no free tier api testing available. However logically this might
|
||||
work.
|
||||
|
||||
|
||||
## At the end
|
||||
|
||||
Also a thank you to all open source and open model developers, who strive for the common good.
|
||||
79
tools/server/public_simplechat/simplechat.css
Normal file
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* the styling of the simplechat web frontend
|
||||
* by Humans for All
|
||||
*/
|
||||
|
||||
#fullbody {
|
||||
height: 98vh;
|
||||
}
|
||||
|
||||
.heading {
|
||||
background-color: lightgray;
|
||||
}
|
||||
|
||||
.session-selected {
|
||||
background-color: lightblue;
|
||||
}
|
||||
|
||||
.role-system {
|
||||
background-color: lightblue;
|
||||
}
|
||||
.role-user {
|
||||
background-color: lightgray;
|
||||
}
|
||||
.role-trim {
|
||||
background-color: lightpink;
|
||||
}
|
||||
|
||||
.gridx2 {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
border-bottom-style: dotted;
|
||||
border-bottom-width: thin;
|
||||
border-bottom-color: lightblue;
|
||||
}
|
||||
|
||||
.flex-grow {
|
||||
flex-grow: 1;
|
||||
}
|
||||
.float-right {
|
||||
float: right;
|
||||
}
|
||||
|
||||
#chat-div {
|
||||
overflow: scroll;
|
||||
flex-grow: 1;
|
||||
flex-shrink: 1;
|
||||
min-height: 40vh;
|
||||
}
|
||||
button {
|
||||
min-width: 8vw;
|
||||
}
|
||||
|
||||
.sameline {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
}
|
||||
.samecolumn {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.ul1 {
|
||||
padding-inline-start: 2vw;
|
||||
}
|
||||
.ul2 {
|
||||
padding-inline-start: 2vw;
|
||||
}
|
||||
|
||||
* {
|
||||
margin: 0.6vmin;
|
||||
}
|
||||
|
||||
@media print {
|
||||
|
||||
#fullbody {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
}
|
||||
929
tools/server/public_simplechat/simplechat.js
Normal file
@@ -0,0 +1,929 @@
|
||||
// @ts-check
|
||||
// A simple completions and chat/completions test related web front end logic
|
||||
// by Humans for All
|
||||
|
||||
import * as du from "./datautils.mjs";
|
||||
import * as ui from "./ui.mjs"
|
||||
|
||||
class Roles {
|
||||
static System = "system";
|
||||
static User = "user";
|
||||
static Assistant = "assistant";
|
||||
}
|
||||
|
||||
class ApiEP {
|
||||
static Type = {
|
||||
Chat: "chat",
|
||||
Completion: "completion",
|
||||
}
|
||||
static UrlSuffix = {
|
||||
'chat': `/chat/completions`,
|
||||
'completion': `/completions`,
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the url from given baseUrl and apiEp id.
|
||||
* @param {string} baseUrl
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
static Url(baseUrl, apiEP) {
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length-1);
|
||||
}
|
||||
return `${baseUrl}${this.UrlSuffix[apiEP]}`;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
let gUsageMsg = `
|
||||
<p class="role-system">Usage</p>
|
||||
<ul class="ul1">
|
||||
<li> System prompt above, to try control ai response characteristics.</li>
|
||||
<ul class="ul2">
|
||||
<li> Completion mode - no system prompt normally.</li>
|
||||
</ul>
|
||||
<li> Use shift+enter for inserting enter/newline.</li>
|
||||
<li> Enter your query to ai assistant below.</li>
|
||||
<li> Default ContextWindow = [System, Last Query+Resp, Cur Query].</li>
|
||||
<ul class="ul2">
|
||||
<li> ChatHistInCtxt, MaxTokens, ModelCtxt window to expand</li>
|
||||
</ul>
|
||||
</ul>
|
||||
`;
|
||||
|
||||
|
||||
/** @typedef {{role: string, content: string}[]} ChatMessages */
|
||||
|
||||
/** @typedef {{iLastSys: number, xchat: ChatMessages}} SimpleChatODS */
|
||||
|
||||
class SimpleChat {
|
||||
|
||||
/**
|
||||
* @param {string} chatId
|
||||
*/
|
||||
constructor(chatId) {
|
||||
this.chatId = chatId;
|
||||
/**
|
||||
* Maintain in a form suitable for common LLM web service chat/completions' messages entry
|
||||
* @type {ChatMessages}
|
||||
*/
|
||||
this.xchat = [];
|
||||
this.iLastSys = -1;
|
||||
this.latestResponse = "";
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.xchat = [];
|
||||
this.iLastSys = -1;
|
||||
}
|
||||
|
||||
ods_key() {
|
||||
return `SimpleChat-${this.chatId}`
|
||||
}
|
||||
|
||||
save() {
|
||||
/** @type {SimpleChatODS} */
|
||||
let ods = {iLastSys: this.iLastSys, xchat: this.xchat};
|
||||
localStorage.setItem(this.ods_key(), JSON.stringify(ods));
|
||||
}
|
||||
|
||||
load() {
|
||||
let sods = localStorage.getItem(this.ods_key());
|
||||
if (sods == null) {
|
||||
return;
|
||||
}
|
||||
/** @type {SimpleChatODS} */
|
||||
let ods = JSON.parse(sods);
|
||||
this.iLastSys = ods.iLastSys;
|
||||
this.xchat = ods.xchat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recent chat messages.
|
||||
* If iRecentUserMsgCnt < 0
|
||||
* Then return the full chat history
|
||||
* Else
|
||||
* Return chat messages from latest going back till the last/latest system prompt.
|
||||
* While keeping track that the number of user queries/messages doesnt exceed iRecentUserMsgCnt.
|
||||
* @param {number} iRecentUserMsgCnt
|
||||
*/
|
||||
recent_chat(iRecentUserMsgCnt) {
|
||||
if (iRecentUserMsgCnt < 0) {
|
||||
return this.xchat;
|
||||
}
|
||||
if (iRecentUserMsgCnt == 0) {
|
||||
console.warn("WARN:SimpleChat:SC:RecentChat:iRecentUsermsgCnt of 0 means no user message/query sent");
|
||||
}
|
||||
/** @type{ChatMessages} */
|
||||
let rchat = [];
|
||||
let sysMsg = this.get_system_latest();
|
||||
if (sysMsg.length != 0) {
|
||||
rchat.push({role: Roles.System, content: sysMsg});
|
||||
}
|
||||
let iUserCnt = 0;
|
||||
let iStart = this.xchat.length;
|
||||
for(let i=this.xchat.length-1; i > this.iLastSys; i--) {
|
||||
if (iUserCnt >= iRecentUserMsgCnt) {
|
||||
break;
|
||||
}
|
||||
let msg = this.xchat[i];
|
||||
if (msg.role == Roles.User) {
|
||||
iStart = i;
|
||||
iUserCnt += 1;
|
||||
}
|
||||
}
|
||||
for(let i = iStart; i < this.xchat.length; i++) {
|
||||
let msg = this.xchat[i];
|
||||
if (msg.role == Roles.System) {
|
||||
continue;
|
||||
}
|
||||
rchat.push({role: msg.role, content: msg.content});
|
||||
}
|
||||
return rchat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collate the latest response from the server/ai-model, as it is becoming available.
|
||||
* This is mainly useful for the stream mode.
|
||||
* @param {string} content
|
||||
*/
|
||||
append_response(content) {
|
||||
this.latestResponse += content;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an entry into xchat
|
||||
* @param {string} role
|
||||
* @param {string|undefined|null} content
|
||||
*/
|
||||
add(role, content) {
|
||||
if ((content == undefined) || (content == null) || (content == "")) {
|
||||
return false;
|
||||
}
|
||||
this.xchat.push( {role: role, content: content} );
|
||||
if (role == Roles.System) {
|
||||
this.iLastSys = this.xchat.length - 1;
|
||||
}
|
||||
this.save();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Show the contents in the specified div
|
||||
* @param {HTMLDivElement} div
|
||||
* @param {boolean} bClear
|
||||
*/
|
||||
show(div, bClear=true) {
|
||||
if (bClear) {
|
||||
div.replaceChildren();
|
||||
}
|
||||
let last = undefined;
|
||||
for(const x of this.recent_chat(gMe.iRecentUserMsgCnt)) {
|
||||
let entry = ui.el_create_append_p(`${x.role}: ${x.content}`, div);
|
||||
entry.className = `role-${x.role}`;
|
||||
last = entry;
|
||||
}
|
||||
if (last !== undefined) {
|
||||
last.scrollIntoView(false);
|
||||
} else {
|
||||
if (bClear) {
|
||||
div.innerHTML = gUsageMsg;
|
||||
gMe.setup_load(div, this);
|
||||
gMe.show_info(div);
|
||||
}
|
||||
}
|
||||
return last;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup the fetch headers.
|
||||
* It picks the headers from gMe.headers.
|
||||
* It inserts Authorization only if its non-empty.
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
fetch_headers(apiEP) {
|
||||
let headers = new Headers();
|
||||
for(let k in gMe.headers) {
|
||||
let v = gMe.headers[k];
|
||||
if ((k == "Authorization") && (v.trim() == "")) {
|
||||
continue;
|
||||
}
|
||||
headers.append(k, v);
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||
* The needed fields/options are picked from a global object.
|
||||
* Add optional stream flag, if required.
|
||||
* Convert the json into string.
|
||||
* @param {Object} obj
|
||||
*/
|
||||
request_jsonstr_extend(obj) {
|
||||
for(let k in gMe.apiRequestOptions) {
|
||||
obj[k] = gMe.apiRequestOptions[k];
|
||||
}
|
||||
if (gMe.bStream) {
|
||||
obj["stream"] = true;
|
||||
}
|
||||
return JSON.stringify(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a string form of json object suitable for chat/completions
|
||||
*/
|
||||
request_messages_jsonstr() {
|
||||
let req = {
|
||||
messages: this.recent_chat(gMe.iRecentUserMsgCnt),
|
||||
}
|
||||
return this.request_jsonstr_extend(req);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a string form of json object suitable for /completions
|
||||
* @param {boolean} bInsertStandardRolePrefix Insert "<THE_ROLE>: " as prefix wrt each role's message
|
||||
*/
|
||||
request_prompt_jsonstr(bInsertStandardRolePrefix) {
|
||||
let prompt = "";
|
||||
let iCnt = 0;
|
||||
for(const chat of this.recent_chat(gMe.iRecentUserMsgCnt)) {
|
||||
iCnt += 1;
|
||||
if (iCnt > 1) {
|
||||
prompt += "\n";
|
||||
}
|
||||
if (bInsertStandardRolePrefix) {
|
||||
prompt += `${chat.role}: `;
|
||||
}
|
||||
prompt += `${chat.content}`;
|
||||
}
|
||||
let req = {
|
||||
prompt: prompt,
|
||||
}
|
||||
return this.request_jsonstr_extend(req);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a string form of json object suitable for specified api endpoint.
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
request_jsonstr(apiEP) {
|
||||
if (apiEP == ApiEP.Type.Chat) {
|
||||
return this.request_messages_jsonstr();
|
||||
} else {
|
||||
return this.request_prompt_jsonstr(gMe.bCompletionInsertStandardRolePrefix);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the ai-model/assistant's response from the http response got.
|
||||
* Optionally trim the message wrt any garbage at the end.
|
||||
* @param {any} respBody
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
response_extract(respBody, apiEP) {
|
||||
let assistant = "";
|
||||
if (apiEP == ApiEP.Type.Chat) {
|
||||
assistant = respBody["choices"][0]["message"]["content"];
|
||||
} else {
|
||||
try {
|
||||
assistant = respBody["choices"][0]["text"];
|
||||
} catch {
|
||||
assistant = respBody["content"];
|
||||
}
|
||||
}
|
||||
return assistant;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the ai-model/assistant's response from the http response got in streaming mode.
|
||||
* @param {any} respBody
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
response_extract_stream(respBody, apiEP) {
|
||||
let assistant = "";
|
||||
if (apiEP == ApiEP.Type.Chat) {
|
||||
if (respBody["choices"][0]["finish_reason"] !== "stop") {
|
||||
assistant = respBody["choices"][0]["delta"]["content"];
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
assistant = respBody["choices"][0]["text"];
|
||||
} catch {
|
||||
assistant = respBody["content"];
|
||||
}
|
||||
}
|
||||
return assistant;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allow setting of system prompt, but only at begining.
|
||||
* @param {string} sysPrompt
|
||||
* @param {string} msgTag
|
||||
*/
|
||||
add_system_begin(sysPrompt, msgTag) {
|
||||
if (this.xchat.length == 0) {
|
||||
if (sysPrompt.length > 0) {
|
||||
return this.add(Roles.System, sysPrompt);
|
||||
}
|
||||
} else {
|
||||
if (sysPrompt.length > 0) {
|
||||
if (this.xchat[0].role !== Roles.System) {
|
||||
console.error(`ERRR:SimpleChat:SC:${msgTag}:You need to specify system prompt before any user query, ignoring...`);
|
||||
} else {
|
||||
if (this.xchat[0].content !== sysPrompt) {
|
||||
console.error(`ERRR:SimpleChat:SC:${msgTag}:You cant change system prompt, mid way through, ignoring...`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allow setting of system prompt, at any time.
|
||||
* @param {string} sysPrompt
|
||||
* @param {string} msgTag
|
||||
*/
|
||||
add_system_anytime(sysPrompt, msgTag) {
|
||||
if (sysPrompt.length <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.iLastSys < 0) {
|
||||
return this.add(Roles.System, sysPrompt);
|
||||
}
|
||||
|
||||
let lastSys = this.xchat[this.iLastSys].content;
|
||||
if (lastSys !== sysPrompt) {
|
||||
return this.add(Roles.System, sysPrompt);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the latest system prompt.
|
||||
*/
|
||||
get_system_latest() {
|
||||
if (this.iLastSys == -1) {
|
||||
return "";
|
||||
}
|
||||
let sysPrompt = this.xchat[this.iLastSys].content;
|
||||
return sysPrompt;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handle the multipart response from server/ai-model
|
||||
* @param {Response} resp
|
||||
* @param {string} apiEP
|
||||
* @param {HTMLDivElement} elDiv
|
||||
*/
|
||||
async handle_response_multipart(resp, apiEP, elDiv) {
|
||||
let elP = ui.el_create_append_p("", elDiv);
|
||||
if (!resp.body) {
|
||||
throw Error("ERRR:SimpleChat:SC:HandleResponseMultiPart:No body...");
|
||||
}
|
||||
let tdUtf8 = new TextDecoder("utf-8");
|
||||
let rr = resp.body.getReader();
|
||||
this.latestResponse = "";
|
||||
let xLines = new du.NewLines();
|
||||
while(true) {
|
||||
let { value: cur, done: done } = await rr.read();
|
||||
if (cur) {
|
||||
let curBody = tdUtf8.decode(cur, {stream: true});
|
||||
console.debug("DBUG:SC:PART:Str:", curBody);
|
||||
xLines.add_append(curBody);
|
||||
}
|
||||
while(true) {
|
||||
let curLine = xLines.shift(!done);
|
||||
if (curLine == undefined) {
|
||||
break;
|
||||
}
|
||||
if (curLine.trim() == "") {
|
||||
continue;
|
||||
}
|
||||
if (curLine.startsWith("data:")) {
|
||||
curLine = curLine.substring(5);
|
||||
}
|
||||
if (curLine.trim() === "[DONE]") {
|
||||
break;
|
||||
}
|
||||
let curJson = JSON.parse(curLine);
|
||||
console.debug("DBUG:SC:PART:Json:", curJson);
|
||||
this.append_response(this.response_extract_stream(curJson, apiEP));
|
||||
}
|
||||
elP.innerText = this.latestResponse;
|
||||
elP.scrollIntoView(false);
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
console.debug("DBUG:SC:PART:Full:", this.latestResponse);
|
||||
return this.latestResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the oneshot response from server/ai-model
|
||||
* @param {Response} resp
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
async handle_response_oneshot(resp, apiEP) {
|
||||
let respBody = await resp.json();
|
||||
console.debug(`DBUG:SimpleChat:SC:${this.chatId}:HandleUserSubmit:RespBody:${JSON.stringify(respBody)}`);
|
||||
return this.response_extract(respBody, apiEP);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the response from the server be it in oneshot or multipart/stream mode.
|
||||
* Also take care of the optional garbage trimming.
|
||||
* @param {Response} resp
|
||||
* @param {string} apiEP
|
||||
* @param {HTMLDivElement} elDiv
|
||||
*/
|
||||
async handle_response(resp, apiEP, elDiv) {
|
||||
let theResp = {
|
||||
assistant: "",
|
||||
trimmed: "",
|
||||
}
|
||||
if (gMe.bStream) {
|
||||
try {
|
||||
theResp.assistant = await this.handle_response_multipart(resp, apiEP, elDiv);
|
||||
this.latestResponse = "";
|
||||
} catch (error) {
|
||||
theResp.assistant = this.latestResponse;
|
||||
this.add(Roles.Assistant, theResp.assistant);
|
||||
this.latestResponse = "";
|
||||
throw error;
|
||||
}
|
||||
} else {
|
||||
theResp.assistant = await this.handle_response_oneshot(resp, apiEP);
|
||||
}
|
||||
if (gMe.bTrimGarbage) {
|
||||
let origMsg = theResp.assistant;
|
||||
theResp.assistant = du.trim_garbage_at_end(origMsg);
|
||||
theResp.trimmed = origMsg.substring(theResp.assistant.length);
|
||||
}
|
||||
this.add(Roles.Assistant, theResp.assistant);
|
||||
return theResp;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
class MultiChatUI {
|
||||
|
||||
constructor() {
|
||||
/** @type {Object<string, SimpleChat>} */
|
||||
this.simpleChats = {};
|
||||
/** @type {string} */
|
||||
this.curChatId = "";
|
||||
|
||||
// the ui elements
|
||||
this.elInSystem = /** @type{HTMLInputElement} */(document.getElementById("system-in"));
|
||||
this.elDivChat = /** @type{HTMLDivElement} */(document.getElementById("chat-div"));
|
||||
this.elBtnUser = /** @type{HTMLButtonElement} */(document.getElementById("user-btn"));
|
||||
this.elInUser = /** @type{HTMLInputElement} */(document.getElementById("user-in"));
|
||||
this.elDivHeading = /** @type{HTMLSelectElement} */(document.getElementById("heading"));
|
||||
this.elDivSessions = /** @type{HTMLDivElement} */(document.getElementById("sessions-div"));
|
||||
this.elBtnSettings = /** @type{HTMLButtonElement} */(document.getElementById("settings"));
|
||||
|
||||
this.validate_element(this.elInSystem, "system-in");
|
||||
this.validate_element(this.elDivChat, "chat-div");
|
||||
this.validate_element(this.elInUser, "user-in");
|
||||
this.validate_element(this.elDivHeading, "heading");
|
||||
this.validate_element(this.elDivChat, "sessions-div");
|
||||
this.validate_element(this.elBtnSettings, "settings");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the element got
|
||||
* @param {HTMLElement | null} el
|
||||
* @param {string} msgTag
|
||||
*/
|
||||
validate_element(el, msgTag) {
|
||||
if (el == null) {
|
||||
throw Error(`ERRR:SimpleChat:MCUI:${msgTag} element missing in html...`);
|
||||
} else {
|
||||
console.debug(`INFO:SimpleChat:MCUI:${msgTag} Id[${el.id}] Name[${el["name"]}]`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset user input ui.
|
||||
* * clear user input
|
||||
* * enable user input
|
||||
* * set focus to user input
|
||||
*/
|
||||
ui_reset_userinput() {
|
||||
this.elInUser.value = "";
|
||||
this.elInUser.disabled = false;
|
||||
this.elInUser.focus();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup the needed callbacks wrt UI, curChatId to defaultChatId and
|
||||
* optionally switch to specified defaultChatId.
|
||||
* @param {string} defaultChatId
|
||||
* @param {boolean} bSwitchSession
|
||||
*/
|
||||
setup_ui(defaultChatId, bSwitchSession=false) {
|
||||
|
||||
this.curChatId = defaultChatId;
|
||||
if (bSwitchSession) {
|
||||
this.handle_session_switch(this.curChatId);
|
||||
}
|
||||
|
||||
this.elBtnSettings.addEventListener("click", (ev)=>{
|
||||
this.elDivChat.replaceChildren();
|
||||
gMe.show_settings(this.elDivChat);
|
||||
});
|
||||
|
||||
this.elBtnUser.addEventListener("click", (ev)=>{
|
||||
if (this.elInUser.disabled) {
|
||||
return;
|
||||
}
|
||||
this.handle_user_submit(this.curChatId, gMe.apiEP).catch((/** @type{Error} */reason)=>{
|
||||
let msg = `ERRR:SimpleChat\nMCUI:HandleUserSubmit:${this.curChatId}\n${reason.name}:${reason.message}`;
|
||||
console.error(msg.replace("\n", ":"));
|
||||
alert(msg);
|
||||
this.ui_reset_userinput();
|
||||
});
|
||||
});
|
||||
|
||||
this.elInUser.addEventListener("keyup", (ev)=> {
|
||||
// allow user to insert enter into their message using shift+enter.
|
||||
// while just pressing enter key will lead to submitting.
|
||||
if ((ev.key === "Enter") && (!ev.shiftKey)) {
|
||||
let value = this.elInUser.value;
|
||||
this.elInUser.value = value.substring(0,value.length-1);
|
||||
this.elBtnUser.click();
|
||||
ev.preventDefault();
|
||||
}
|
||||
});
|
||||
|
||||
this.elInSystem.addEventListener("keyup", (ev)=> {
|
||||
// allow user to insert enter into the system prompt using shift+enter.
|
||||
// while just pressing enter key will lead to setting the system prompt.
|
||||
if ((ev.key === "Enter") && (!ev.shiftKey)) {
|
||||
let value = this.elInSystem.value;
|
||||
this.elInSystem.value = value.substring(0,value.length-1);
|
||||
let chat = this.simpleChats[this.curChatId];
|
||||
chat.add_system_anytime(this.elInSystem.value, this.curChatId);
|
||||
chat.show(this.elDivChat);
|
||||
ev.preventDefault();
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup a new chat session and optionally switch to it.
|
||||
* @param {string} chatId
|
||||
* @param {boolean} bSwitchSession
|
||||
*/
|
||||
new_chat_session(chatId, bSwitchSession=false) {
|
||||
this.simpleChats[chatId] = new SimpleChat(chatId);
|
||||
if (bSwitchSession) {
|
||||
this.handle_session_switch(chatId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handle user query submit request, wrt specified chat session.
|
||||
* @param {string} chatId
|
||||
* @param {string} apiEP
|
||||
*/
|
||||
async handle_user_submit(chatId, apiEP) {
|
||||
|
||||
let chat = this.simpleChats[chatId];
|
||||
|
||||
// In completion mode, if configured, clear any previous chat history.
|
||||
// So if user wants to simulate a multi-chat based completion query,
|
||||
// they will have to enter the full thing, as a suitable multiline
|
||||
// user input/query.
|
||||
if ((apiEP == ApiEP.Type.Completion) && (gMe.bCompletionFreshChatAlways)) {
|
||||
chat.clear();
|
||||
}
|
||||
|
||||
chat.add_system_anytime(this.elInSystem.value, chatId);
|
||||
|
||||
let content = this.elInUser.value;
|
||||
if (!chat.add(Roles.User, content)) {
|
||||
console.debug(`WARN:SimpleChat:MCUI:${chatId}:HandleUserSubmit:Ignoring empty user input...`);
|
||||
return;
|
||||
}
|
||||
chat.show(this.elDivChat);
|
||||
|
||||
let theUrl = ApiEP.Url(gMe.baseURL, apiEP);
|
||||
let theBody = chat.request_jsonstr(apiEP);
|
||||
|
||||
this.elInUser.value = "working...";
|
||||
this.elInUser.disabled = true;
|
||||
console.debug(`DBUG:SimpleChat:MCUI:${chatId}:HandleUserSubmit:${theUrl}:ReqBody:${theBody}`);
|
||||
let theHeaders = chat.fetch_headers(apiEP);
|
||||
let resp = await fetch(theUrl, {
|
||||
method: "POST",
|
||||
headers: theHeaders,
|
||||
body: theBody,
|
||||
});
|
||||
|
||||
let theResp = await chat.handle_response(resp, apiEP, this.elDivChat);
|
||||
if (chatId == this.curChatId) {
|
||||
chat.show(this.elDivChat);
|
||||
if (theResp.trimmed.length > 0) {
|
||||
let p = ui.el_create_append_p(`TRIMMED:${theResp.trimmed}`, this.elDivChat);
|
||||
p.className="role-trim";
|
||||
}
|
||||
} else {
|
||||
console.debug(`DBUG:SimpleChat:MCUI:HandleUserSubmit:ChatId has changed:[${chatId}] [${this.curChatId}]`);
|
||||
}
|
||||
this.ui_reset_userinput();
|
||||
}
|
||||
|
||||
/**
|
||||
* Show buttons for NewChat and available chat sessions, in the passed elDiv.
|
||||
* If elDiv is undefined/null, then use this.elDivSessions.
|
||||
* Take care of highlighting the selected chat-session's btn.
|
||||
* @param {HTMLDivElement | undefined} elDiv
|
||||
*/
|
||||
show_sessions(elDiv=undefined) {
|
||||
if (!elDiv) {
|
||||
elDiv = this.elDivSessions;
|
||||
}
|
||||
elDiv.replaceChildren();
|
||||
// Btn for creating new chat session
|
||||
let btnNew = ui.el_create_button("New CHAT", (ev)=> {
|
||||
if (this.elInUser.disabled) {
|
||||
console.error(`ERRR:SimpleChat:MCUI:NewChat:Current session [${this.curChatId}] awaiting response, ignoring request...`);
|
||||
alert("ERRR:SimpleChat\nMCUI:NewChat\nWait for response to pending query, before starting new chat session");
|
||||
return;
|
||||
}
|
||||
let chatId = `Chat${Object.keys(this.simpleChats).length}`;
|
||||
let chatIdGot = prompt("INFO:SimpleChat\nMCUI:NewChat\nEnter id for new chat session", chatId);
|
||||
if (!chatIdGot) {
|
||||
console.error("ERRR:SimpleChat:MCUI:NewChat:Skipping based on user request...");
|
||||
return;
|
||||
}
|
||||
this.new_chat_session(chatIdGot, true);
|
||||
this.create_session_btn(elDiv, chatIdGot);
|
||||
ui.el_children_config_class(elDiv, chatIdGot, "session-selected", "");
|
||||
});
|
||||
elDiv.appendChild(btnNew);
|
||||
// Btns for existing chat sessions
|
||||
let chatIds = Object.keys(this.simpleChats);
|
||||
for(let cid of chatIds) {
|
||||
let btn = this.create_session_btn(elDiv, cid);
|
||||
if (cid == this.curChatId) {
|
||||
btn.className = "session-selected";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
create_session_btn(elDiv, cid) {
|
||||
let btn = ui.el_create_button(cid, (ev)=>{
|
||||
let target = /** @type{HTMLButtonElement} */(ev.target);
|
||||
console.debug(`DBUG:SimpleChat:MCUI:SessionClick:${target.id}`);
|
||||
if (this.elInUser.disabled) {
|
||||
console.error(`ERRR:SimpleChat:MCUI:SessionClick:${target.id}:Current session [${this.curChatId}] awaiting response, ignoring switch...`);
|
||||
alert("ERRR:SimpleChat\nMCUI:SessionClick\nWait for response to pending query, before switching");
|
||||
return;
|
||||
}
|
||||
this.handle_session_switch(target.id);
|
||||
ui.el_children_config_class(elDiv, target.id, "session-selected", "");
|
||||
});
|
||||
elDiv.appendChild(btn);
|
||||
return btn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Switch ui to the specified chatId and set curChatId to same.
|
||||
* @param {string} chatId
|
||||
*/
|
||||
async handle_session_switch(chatId) {
|
||||
let chat = this.simpleChats[chatId];
|
||||
if (chat == undefined) {
|
||||
console.error(`ERRR:SimpleChat:MCUI:HandleSessionSwitch:${chatId} missing...`);
|
||||
return;
|
||||
}
|
||||
this.elInSystem.value = chat.get_system_latest();
|
||||
this.elInUser.value = "";
|
||||
chat.show(this.elDivChat);
|
||||
this.elInUser.focus();
|
||||
this.curChatId = chatId;
|
||||
console.log(`INFO:SimpleChat:MCUI:HandleSessionSwitch:${chatId} entered...`);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
class Me {
|
||||
|
||||
constructor() {
|
||||
this.baseURL = "http://127.0.0.1:8080";
|
||||
this.defaultChatIds = [ "Default", "Other" ];
|
||||
this.multiChat = new MultiChatUI();
|
||||
this.bStream = true;
|
||||
this.bCompletionFreshChatAlways = true;
|
||||
this.bCompletionInsertStandardRolePrefix = false;
|
||||
this.bTrimGarbage = true;
|
||||
this.iRecentUserMsgCnt = 2;
|
||||
this.sRecentUserMsgCnt = {
|
||||
"Full": -1,
|
||||
"Last0": 1,
|
||||
"Last1": 2,
|
||||
"Last2": 3,
|
||||
"Last4": 5,
|
||||
};
|
||||
this.apiEP = ApiEP.Type.Chat;
|
||||
this.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY
|
||||
}
|
||||
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||
this.apiRequestOptions = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 1024,
|
||||
"n_predict": 1024,
|
||||
"cache_prompt": false,
|
||||
//"frequency_penalty": 1.2,
|
||||
//"presence_penalty": 1.2,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable console.debug by mapping it to a empty function.
|
||||
*/
|
||||
debug_disable() {
|
||||
this.console_debug = console.debug;
|
||||
console.debug = () => {
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup the load saved chat ui.
|
||||
* @param {HTMLDivElement} div
|
||||
* @param {SimpleChat} chat
|
||||
*/
|
||||
setup_load(div, chat) {
|
||||
if (!(chat.ods_key() in localStorage)) {
|
||||
return;
|
||||
}
|
||||
div.innerHTML += `<p class="role-system">Restore</p>
|
||||
<p>Load previously saved chat session, if available</p>`;
|
||||
let btn = ui.el_create_button(chat.ods_key(), (ev)=>{
|
||||
console.log("DBUG:SimpleChat:SC:Load", chat);
|
||||
chat.load();
|
||||
queueMicrotask(()=>{
|
||||
chat.show(div);
|
||||
this.multiChat.elInSystem.value = chat.get_system_latest();
|
||||
});
|
||||
});
|
||||
div.appendChild(btn);
|
||||
}
|
||||
|
||||
/**
|
||||
* Show the configurable parameters info in the passed Div element.
|
||||
* @param {HTMLDivElement} elDiv
|
||||
* @param {boolean} bAll
|
||||
*/
|
||||
show_info(elDiv, bAll=false) {
|
||||
|
||||
let p = ui.el_create_append_p("Settings (devel-tools-console document[gMe])", elDiv);
|
||||
p.className = "role-system";
|
||||
|
||||
if (bAll) {
|
||||
|
||||
ui.el_create_append_p(`baseURL:${this.baseURL}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`Authorization:${this.headers["Authorization"]}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bStream:${this.bStream}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`, elDiv);
|
||||
|
||||
}
|
||||
|
||||
ui.el_create_append_p(`apiRequestOptions:${JSON.stringify(this.apiRequestOptions, null, " - ")}`, elDiv);
|
||||
ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto create ui input elements for fields in apiRequestOptions
|
||||
* Currently supports text and number field types.
|
||||
* @param {HTMLDivElement} elDiv
|
||||
*/
|
||||
show_settings_apirequestoptions(elDiv) {
|
||||
let typeDict = {
|
||||
"string": "text",
|
||||
"number": "number",
|
||||
};
|
||||
let fs = document.createElement("fieldset");
|
||||
let legend = document.createElement("legend");
|
||||
legend.innerText = "ApiRequestOptions";
|
||||
fs.appendChild(legend);
|
||||
elDiv.appendChild(fs);
|
||||
for(const k in this.apiRequestOptions) {
|
||||
let val = this.apiRequestOptions[k];
|
||||
let type = typeof(val);
|
||||
if (((type == "string") || (type == "number"))) {
|
||||
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.apiRequestOptions[k], (val)=>{
|
||||
if (type == "number") {
|
||||
val = Number(val);
|
||||
}
|
||||
this.apiRequestOptions[k] = val;
|
||||
});
|
||||
fs.appendChild(inp.div);
|
||||
} else if (type == "boolean") {
|
||||
let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{
|
||||
this.apiRequestOptions[k] = userVal;
|
||||
});
|
||||
fs.appendChild(bbtn.div);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Show settings ui for configurable parameters, in the passed Div element.
|
||||
* @param {HTMLDivElement} elDiv
|
||||
*/
|
||||
show_settings(elDiv) {
|
||||
|
||||
let inp = ui.el_creatediv_input("SetBaseURL", "BaseURL", "text", this.baseURL, (val)=>{
|
||||
this.baseURL = val;
|
||||
});
|
||||
elDiv.appendChild(inp.div);
|
||||
|
||||
inp = ui.el_creatediv_input("SetAuthorization", "Authorization", "text", this.headers["Authorization"], (val)=>{
|
||||
this.headers["Authorization"] = val;
|
||||
});
|
||||
inp.el.placeholder = "Bearer OPENAI_API_KEY";
|
||||
elDiv.appendChild(inp.div);
|
||||
|
||||
let bb = ui.el_creatediv_boolbutton("SetStream", "Stream", {true: "[+] yes stream", false: "[-] do oneshot"}, this.bStream, (val)=>{
|
||||
this.bStream = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
|
||||
this.bTrimGarbage = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
this.show_settings_apirequestoptions(elDiv);
|
||||
|
||||
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
|
||||
this.apiEP = ApiEP.Type[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{
|
||||
this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetCompletionFreshChatAlways", "CompletionFreshChatAlways", {true: "[+] yes fresh", false: "[-] no, with history"}, this.bCompletionFreshChatAlways, (val)=>{
|
||||
this.bCompletionFreshChatAlways = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetCompletionInsertStandardRolePrefix", "CompletionInsertStandardRolePrefix", {true: "[+] yes insert", false: "[-] dont insert"}, this.bCompletionInsertStandardRolePrefix, (val)=>{
|
||||
this.bCompletionInsertStandardRolePrefix = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/** @type {Me} */
|
||||
let gMe;
|
||||
|
||||
function startme() {
|
||||
console.log("INFO:SimpleChat:StartMe:Starting...");
|
||||
gMe = new Me();
|
||||
gMe.debug_disable();
|
||||
document["gMe"] = gMe;
|
||||
document["du"] = du;
|
||||
for (let cid of gMe.defaultChatIds) {
|
||||
gMe.multiChat.new_chat_session(cid);
|
||||
}
|
||||
gMe.multiChat.setup_ui(gMe.defaultChatIds[0], true);
|
||||
gMe.multiChat.show_sessions();
|
||||
}
|
||||
|
||||
document.addEventListener("DOMContentLoaded", startme);
|
||||
BIN
tools/server/public_simplechat/simplechat_screens.webp
Normal file
|
After Width: | Height: | Size: 21 KiB |
211
tools/server/public_simplechat/ui.mjs
Normal file
@@ -0,0 +1,211 @@
|
||||
//@ts-check
|
||||
// Helpers to work with html elements
|
||||
// by Humans for All
|
||||
//
|
||||
|
||||
|
||||
/**
|
||||
* Set the class of the children, based on whether it is the idSelected or not.
|
||||
* @param {HTMLDivElement} elBase
|
||||
* @param {string} idSelected
|
||||
* @param {string} classSelected
|
||||
* @param {string} classUnSelected
|
||||
*/
|
||||
export function el_children_config_class(elBase, idSelected, classSelected, classUnSelected="") {
|
||||
for(let child of elBase.children) {
|
||||
if (child.id == idSelected) {
|
||||
child.className = classSelected;
|
||||
} else {
|
||||
child.className = classUnSelected;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create button and set it up.
|
||||
* @param {string} id
|
||||
* @param {(this: HTMLButtonElement, ev: MouseEvent) => any} callback
|
||||
* @param {string | undefined} name
|
||||
* @param {string | undefined} innerText
|
||||
*/
|
||||
export function el_create_button(id, callback, name=undefined, innerText=undefined) {
|
||||
if (!name) {
|
||||
name = id;
|
||||
}
|
||||
if (!innerText) {
|
||||
innerText = id;
|
||||
}
|
||||
let btn = document.createElement("button");
|
||||
btn.id = id;
|
||||
btn.name = name;
|
||||
btn.innerText = innerText;
|
||||
btn.addEventListener("click", callback);
|
||||
return btn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a para and set it up. Optionaly append it to a passed parent.
|
||||
* @param {string} text
|
||||
* @param {HTMLElement | undefined} elParent
|
||||
* @param {string | undefined} id
|
||||
*/
|
||||
export function el_create_append_p(text, elParent=undefined, id=undefined) {
|
||||
let para = document.createElement("p");
|
||||
para.innerText = text;
|
||||
if (id) {
|
||||
para.id = id;
|
||||
}
|
||||
if (elParent) {
|
||||
elParent.appendChild(para);
|
||||
}
|
||||
return para;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a button which represents bool value using specified text wrt true and false.
|
||||
* When ever user clicks the button, it will toggle the value and update the shown text.
|
||||
*
|
||||
* @param {string} id
|
||||
* @param {{true: string, false: string}} texts
|
||||
* @param {boolean} defaultValue
|
||||
* @param {function(boolean):void} cb
|
||||
*/
|
||||
export function el_create_boolbutton(id, texts, defaultValue, cb) {
|
||||
let el = document.createElement("button");
|
||||
el["xbool"] = defaultValue;
|
||||
el["xtexts"] = structuredClone(texts);
|
||||
el.innerText = el["xtexts"][String(defaultValue)];
|
||||
if (id) {
|
||||
el.id = id;
|
||||
}
|
||||
el.addEventListener('click', (ev)=>{
|
||||
el["xbool"] = !el["xbool"];
|
||||
el.innerText = el["xtexts"][String(el["xbool"])];
|
||||
cb(el["xbool"]);
|
||||
})
|
||||
return el;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a div wrapped button which represents bool value using specified text wrt true and false.
|
||||
* @param {string} id
|
||||
* @param {string} label
|
||||
* @param {{ true: string; false: string; }} texts
|
||||
* @param {boolean} defaultValue
|
||||
* @param {(arg0: boolean) => void} cb
|
||||
* @param {string} className
|
||||
*/
|
||||
export function el_creatediv_boolbutton(id, label, texts, defaultValue, cb, className="gridx2") {
|
||||
let div = document.createElement("div");
|
||||
div.className = className;
|
||||
let lbl = document.createElement("label");
|
||||
lbl.setAttribute("for", id);
|
||||
lbl.innerText = label;
|
||||
div.appendChild(lbl);
|
||||
let btn = el_create_boolbutton(id, texts, defaultValue, cb);
|
||||
div.appendChild(btn);
|
||||
return { div: div, el: btn };
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a select ui element, with a set of options to select from.
|
||||
* * options: an object which contains name-value pairs
|
||||
* * defaultOption: the value whose name should be choosen, by default.
|
||||
* * cb : the call back returns the name string of the option selected.
|
||||
*
|
||||
* @param {string} id
|
||||
* @param {Object<string,*>} options
|
||||
* @param {*} defaultOption
|
||||
* @param {function(string):void} cb
|
||||
*/
|
||||
export function el_create_select(id, options, defaultOption, cb) {
|
||||
let el = document.createElement("select");
|
||||
el["xselected"] = defaultOption;
|
||||
el["xoptions"] = structuredClone(options);
|
||||
for(let cur of Object.keys(options)) {
|
||||
let op = document.createElement("option");
|
||||
op.value = cur;
|
||||
op.innerText = cur;
|
||||
if (options[cur] == defaultOption) {
|
||||
op.selected = true;
|
||||
}
|
||||
el.appendChild(op);
|
||||
}
|
||||
if (id) {
|
||||
el.id = id;
|
||||
el.name = id;
|
||||
}
|
||||
el.addEventListener('change', (ev)=>{
|
||||
let target = /** @type{HTMLSelectElement} */(ev.target);
|
||||
console.log("DBUG:UI:Select:", id, ":", target.value);
|
||||
cb(target.value);
|
||||
})
|
||||
return el;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a div wrapped select ui element, with a set of options to select from.
|
||||
*
|
||||
* @param {string} id
|
||||
* @param {any} label
|
||||
* @param {{ [x: string]: any; }} options
|
||||
* @param {any} defaultOption
|
||||
* @param {(arg0: string) => void} cb
|
||||
* @param {string} className
|
||||
*/
|
||||
export function el_creatediv_select(id, label, options, defaultOption, cb, className="gridx2") {
|
||||
let div = document.createElement("div");
|
||||
div.className = className;
|
||||
let lbl = document.createElement("label");
|
||||
lbl.setAttribute("for", id);
|
||||
lbl.innerText = label;
|
||||
div.appendChild(lbl);
|
||||
let sel = el_create_select(id, options,defaultOption, cb);
|
||||
div.appendChild(sel);
|
||||
return { div: div, el: sel };
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a input ui element.
|
||||
*
|
||||
* @param {string} id
|
||||
* @param {string} type
|
||||
* @param {any} defaultValue
|
||||
* @param {function(any):void} cb
|
||||
*/
|
||||
export function el_create_input(id, type, defaultValue, cb) {
|
||||
let el = document.createElement("input");
|
||||
el.type = type;
|
||||
el.value = defaultValue;
|
||||
if (id) {
|
||||
el.id = id;
|
||||
}
|
||||
el.addEventListener('change', (ev)=>{
|
||||
cb(el.value);
|
||||
})
|
||||
return el;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a div wrapped input.
|
||||
*
|
||||
* @param {string} id
|
||||
* @param {string} label
|
||||
* @param {string} type
|
||||
* @param {any} defaultValue
|
||||
* @param {function(any):void} cb
|
||||
* @param {string} className
|
||||
*/
|
||||
export function el_creatediv_input(id, label, type, defaultValue, cb, className="gridx2") {
|
||||
let div = document.createElement("div");
|
||||
div.className = className;
|
||||
let lbl = document.createElement("label");
|
||||
lbl.setAttribute("for", id);
|
||||
lbl.innerText = label;
|
||||
div.appendChild(lbl);
|
||||
let el = el_create_input(id, type, defaultValue, cb);
|
||||
div.appendChild(el);
|
||||
return { div: div, el: el };
|
||||
}
|
||||
1686
tools/server/server-common.cpp
Normal file
362
tools/server/server-common.h
Normal file
@@ -0,0 +1,362 @@
|
||||
#pragma once
|
||||
|
||||
#include "common.h"
|
||||
#include "log.h"
|
||||
#include "llama.h"
|
||||
#include "chat.h"
|
||||
#include "mtmd.h"
|
||||
|
||||
#define JSON_ASSERT GGML_ASSERT
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <cinttypes>
|
||||
|
||||
const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT);
|
||||
|
||||
using json = nlohmann::ordered_json;
|
||||
|
||||
#define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__)
|
||||
#define SLT_CNT(slot, fmt, ...) LOG_CNT("" fmt, __VA_ARGS__)
|
||||
#define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__)
|
||||
#define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__)
|
||||
#define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__)
|
||||
|
||||
#define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_CNT(fmt, ...) LOG_CNT("" fmt, __VA_ARGS__)
|
||||
#define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
|
||||
using raw_buffer = std::vector<uint8_t>;
|
||||
|
||||
template <typename T>
|
||||
static T json_value(const json & body, const std::string & key, const T & default_value) {
|
||||
// Fallback null to default value
|
||||
if (body.contains(key) && !body.at(key).is_null()) {
|
||||
try {
|
||||
return body.at(key);
|
||||
} catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const & err) {
|
||||
LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value: %s\n", key.c_str(), json(default_value).type_name(), err.what());
|
||||
return default_value;
|
||||
}
|
||||
} else {
|
||||
return default_value;
|
||||
}
|
||||
}
|
||||
|
||||
// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
|
||||
enum error_type {
|
||||
ERROR_TYPE_INVALID_REQUEST,
|
||||
ERROR_TYPE_AUTHENTICATION,
|
||||
ERROR_TYPE_SERVER,
|
||||
ERROR_TYPE_NOT_FOUND,
|
||||
ERROR_TYPE_PERMISSION,
|
||||
ERROR_TYPE_UNAVAILABLE, // custom error
|
||||
ERROR_TYPE_NOT_SUPPORTED, // custom error
|
||||
ERROR_TYPE_EXCEED_CONTEXT_SIZE, // custom error
|
||||
};
|
||||
|
||||
// thin wrapper around common_grammar_trigger with (de)serialization functions
|
||||
struct server_grammar_trigger {
|
||||
common_grammar_trigger value;
|
||||
|
||||
server_grammar_trigger() = default;
|
||||
server_grammar_trigger(const common_grammar_trigger & value) : value(value) {}
|
||||
server_grammar_trigger(const json & in) {
|
||||
value.type = (common_grammar_trigger_type) in.at("type").get<int>();
|
||||
value.value = in.at("value").get<std::string>();
|
||||
if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
|
||||
value.token = (llama_token) in.at("token").get<int>();
|
||||
}
|
||||
}
|
||||
|
||||
json to_json() const {
|
||||
json out {
|
||||
{"type", (int) value.type},
|
||||
{"value", value.value},
|
||||
};
|
||||
if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
|
||||
out["token"] = (int) value.token;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
json format_error_response(const std::string & message, const enum error_type type);
|
||||
|
||||
//
|
||||
// random string / id
|
||||
//
|
||||
|
||||
std::string random_string();
|
||||
std::string gen_chatcmplid();
|
||||
std::string gen_tool_call_id();
|
||||
|
||||
//
|
||||
// lora utils
|
||||
//
|
||||
|
||||
// check whether the given lora set has only aloras activated (empty => false)
|
||||
bool lora_all_alora(const std::vector<common_adapter_lora_info> & loras);
|
||||
|
||||
// if the two sets of loras are different, they require a cache clear unless the
|
||||
// change is only from aloras to aloras.
|
||||
bool lora_should_clear_cache(
|
||||
const std::vector<common_adapter_lora_info> & current,
|
||||
const std::vector<common_adapter_lora_info> & next);
|
||||
|
||||
std::map<int, float> parse_lora_request(const json & data);
|
||||
|
||||
bool are_lora_equal(
|
||||
const std::vector<common_adapter_lora_info> & l1,
|
||||
const std::vector<common_adapter_lora_info> & l2);
|
||||
|
||||
// get the ids of all enabled loras
|
||||
std::vector<size_t> lora_get_enabled_ids(const std::vector<common_adapter_lora_info> & loras);
|
||||
|
||||
//
|
||||
// server_tokens
|
||||
//
|
||||
|
||||
/**
|
||||
* server_tokens is a helper to manage the input tokens and image for the server.
|
||||
* it is made this way to simplify the logic of KV cache management.
|
||||
*/
|
||||
struct server_tokens {
|
||||
bool has_mtmd = false;
|
||||
|
||||
private: // disallow accessing these members directly, risking out-of-sync
|
||||
|
||||
// map a **start** index in tokens to the image chunk
|
||||
// note: the order need to be in-sync with tokens
|
||||
std::map<size_t, mtmd::input_chunk_ptr> map_idx_to_media;
|
||||
|
||||
// list of tokens
|
||||
// if the token is LLAMA_TOKEN_NULL, it indicates that this position is occupied by media chunk
|
||||
// otherwise, it is a normal text token
|
||||
// note: a non-text chunk can occupy multiple tokens (aka memory cells) in the token list
|
||||
// note(2): for M-RoPE, an image can occupy different number of pos; do not assume 1-to-1 mapping tokens <-> pos
|
||||
llama_tokens tokens;
|
||||
|
||||
// for ex. with input of 5 text tokens and 2 images (each image occupies 3 tokens and 2 pos):
|
||||
// [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1] [img1]
|
||||
// idx 0 1 2 3 4 5 6 7 8 9 10
|
||||
// pos 0 1 2 3 4 5 5 5 7 7 7
|
||||
// map_idx_to_media will contain: {5, img0}, {8, img1}
|
||||
|
||||
public:
|
||||
server_tokens() = default;
|
||||
~server_tokens() = default;
|
||||
|
||||
// Prevent copying
|
||||
// TODO: server_tokens should be copyable - remove this:
|
||||
server_tokens(const server_tokens&) = delete;
|
||||
server_tokens& operator=(const server_tokens&) = delete;
|
||||
|
||||
// Allow moving (usually implicitly generated if members are movable)
|
||||
server_tokens(server_tokens&&) = default;
|
||||
server_tokens& operator=(server_tokens&&) = default;
|
||||
|
||||
// Allow accessing elements using [] operator
|
||||
llama_token operator[](size_t index) { return tokens[index]; }
|
||||
const llama_token& operator[](size_t index) const { return tokens[index]; }
|
||||
|
||||
server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd);
|
||||
server_tokens(const llama_tokens & tokens, bool has_mtmd);
|
||||
|
||||
// for debugging
|
||||
std::string str() const;
|
||||
|
||||
llama_pos pos_next() const;
|
||||
const mtmd::input_chunk_ptr & find_chunk(size_t idx) const;
|
||||
|
||||
void push_back(llama_token tok);
|
||||
|
||||
// will create a copy of the chunk if it contains non-text data
|
||||
void push_back(const mtmd_input_chunk * chunk);
|
||||
|
||||
// appends server tokens, updates the media map. copies media chunks.
|
||||
void push_back(server_tokens & tokens);
|
||||
|
||||
// for compatibility with context shift and prompt truncation
|
||||
void insert(const llama_tokens & inp_tokens);
|
||||
|
||||
// for compatibility with speculative decoding, ctx shift, slot save/load
|
||||
const llama_tokens & get_text_tokens() const;
|
||||
|
||||
// for compatibility with speculative decoding
|
||||
void set_token(llama_pos pos, llama_token id);
|
||||
|
||||
size_t size() const { return tokens.size(); }
|
||||
|
||||
bool empty() const { return tokens.empty(); }
|
||||
|
||||
void clear() {
|
||||
map_idx_to_media.clear();
|
||||
tokens.clear();
|
||||
}
|
||||
|
||||
void keep_first(size_t n);
|
||||
|
||||
std::string detokenize(const llama_context * ctx, bool special) const;
|
||||
|
||||
size_t get_common_prefix(const server_tokens & b) const;
|
||||
|
||||
// make sure all text tokens are within the vocab range
|
||||
bool validate(const struct llama_context * ctx) const;
|
||||
|
||||
// encode and decode the image chunk
|
||||
int32_t process_chunk(
|
||||
llama_context * ctx,
|
||||
mtmd_context * mctx,
|
||||
size_t idx,
|
||||
llama_pos pos,
|
||||
int32_t seq_id,
|
||||
size_t & n_tokens_out) const;
|
||||
|
||||
server_tokens clone() const;
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// tokenizer and input processing utils
|
||||
//
|
||||
|
||||
bool json_is_array_of_numbers(const json & data);
|
||||
|
||||
// is array having BOTH numbers & strings?
|
||||
bool json_is_array_of_mixed_numbers_strings(const json & data);
|
||||
|
||||
// does array have any individual integers/tokens?
|
||||
bool json_is_array_and_contains_numbers(const json & data);
|
||||
|
||||
// get value by path(key1 / key2)
|
||||
json json_get_nested_values(const std::vector<std::string> & paths, const json & js);
|
||||
|
||||
/**
|
||||
* this handles 2 cases:
|
||||
* - only string, example: "string"
|
||||
* - mixed string and tokens, example: [12, 34, "string", 56, 78]
|
||||
*/
|
||||
llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special);
|
||||
|
||||
// return the last index of character that can form a valid string
|
||||
// if the last character is potentially cut in half, return the index before the cut
|
||||
// if validate_utf8(text) == text.size(), then the whole text is valid utf8
|
||||
size_t validate_utf8(const std::string& text);
|
||||
|
||||
// process mtmd prompt, return the server_tokens containing both text tokens and media chunks
|
||||
server_tokens process_mtmd_prompt(mtmd_context * mctx, std::string prompt, std::vector<raw_buffer> files);
|
||||
|
||||
/**
|
||||
* break the input "prompt" object into multiple prompt if needed, then tokenize them
|
||||
* this supports these cases:
|
||||
* - "prompt": "string"
|
||||
* - "prompt": [12, 34, 56]
|
||||
* - "prompt": [12, 34, "string", 56, 78]
|
||||
* - "prompt": { "prompt_string": "string", "multimodal_data": [ "base64" ] }
|
||||
* and multiple prompts (multi-tasks):
|
||||
* - "prompt": ["string1", "string2"]
|
||||
* - "prompt": ["string1", [12, 34, 56]]
|
||||
* - "prompt": [[12, 34, 56], [78, 90, 12]]
|
||||
* - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56], { "prompt_string": "string", "multimodal_data": [ "base64" ]}]
|
||||
*/
|
||||
std::vector<server_tokens> tokenize_input_prompts(
|
||||
const llama_vocab * vocab,
|
||||
mtmd_context * mctx,
|
||||
const json & json_prompt,
|
||||
bool add_special,
|
||||
bool parse_special);
|
||||
|
||||
//
|
||||
// OAI utils
|
||||
//
|
||||
|
||||
// used by /completions endpoint
|
||||
json oaicompat_completion_params_parse(const json & body);
|
||||
|
||||
struct oaicompat_parser_options {
|
||||
bool use_jinja;
|
||||
bool prefill_assistant;
|
||||
common_reasoning_format reasoning_format;
|
||||
std::map<std::string,std::string> chat_template_kwargs;
|
||||
common_chat_templates * tmpls;
|
||||
bool allow_image;
|
||||
bool allow_audio;
|
||||
bool enable_thinking = true;
|
||||
std::string media_path;
|
||||
};
|
||||
|
||||
// used by /chat/completions endpoint
|
||||
json oaicompat_chat_params_parse(
|
||||
json & body, /* openai api json semantics */
|
||||
const oaicompat_parser_options & opt,
|
||||
std::vector<raw_buffer> & out_files);
|
||||
|
||||
// convert Anthropic Messages API format to OpenAI Chat Completions API format
|
||||
json convert_anthropic_to_oai(const json & body);
|
||||
|
||||
// TODO: move it to server-task.cpp
|
||||
json format_embeddings_response_oaicompat(
|
||||
const json & request,
|
||||
const std::string & model_name,
|
||||
const json & embeddings,
|
||||
bool use_base64 = false);
|
||||
|
||||
// TODO: move it to server-task.cpp
|
||||
json format_response_rerank(
|
||||
const json & request,
|
||||
const std::string & model_name,
|
||||
const json & ranks,
|
||||
bool is_tei_format,
|
||||
std::vector<std::string> & texts,
|
||||
int top_n);
|
||||
|
||||
//
|
||||
// other utils
|
||||
//
|
||||
|
||||
std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx);
|
||||
|
||||
std::string safe_json_to_str(const json & data);
|
||||
|
||||
std::string tokens_to_str(llama_context * ctx, const llama_tokens & tokens);
|
||||
std::string tokens_to_str(const llama_vocab * vocab, const llama_tokens & tokens);
|
||||
|
||||
// format incomplete utf-8 multibyte character for output
|
||||
std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token);
|
||||
|
||||
// format server-sent event (SSE), return the formatted string to send
|
||||
// note: if data is a json array, it will be sent as multiple events, one per item
|
||||
std::string format_oai_sse(const json & data);
|
||||
|
||||
// format Anthropic-style SSE with event types
|
||||
std::string format_anthropic_sse(const json & data);
|
||||
|
||||
bool is_valid_utf8(const std::string & str);
|
||||
|
||||
//
|
||||
// formatting output responses
|
||||
// TODO: move these to server-task.cpp
|
||||
//
|
||||
|
||||
llama_tokens format_prompt_infill(
|
||||
const llama_vocab * vocab,
|
||||
const json & input_prefix,
|
||||
const json & input_suffix,
|
||||
const json & input_extra,
|
||||
const int n_batch,
|
||||
const int n_predict,
|
||||
const int n_ctx,
|
||||
const bool spm_infill,
|
||||
const llama_tokens & tokens_prompt);
|
||||
|
||||
// format rerank task: [BOS]query[EOS][SEP]doc[EOS].
|
||||
server_tokens format_prompt_rerank(
|
||||
const struct llama_model * model,
|
||||
const struct llama_vocab * vocab,
|
||||
mtmd_context * mctx,
|
||||
const std::string & query,
|
||||
const std::string & doc);
|
||||
4088
tools/server/server-context.cpp
Normal file
130
tools/server/server-context.h
Normal file
@@ -0,0 +1,130 @@
|
||||
#include "server-http.h"
|
||||
#include "server-task.h"
|
||||
#include "server-queue.h"
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
struct server_context_impl; // private implementation
|
||||
|
||||
struct server_context_meta {
|
||||
std::string build_info;
|
||||
std::string model_name;
|
||||
std::string model_path;
|
||||
bool has_mtmd;
|
||||
bool has_inp_image;
|
||||
bool has_inp_audio;
|
||||
json json_webui_settings;
|
||||
int slot_n_ctx;
|
||||
enum llama_pooling_type pooling_type;
|
||||
|
||||
// chat template
|
||||
std::string chat_template;
|
||||
std::string chat_template_tool_use;
|
||||
|
||||
// tokens
|
||||
std::string bos_token_str;
|
||||
std::string eos_token_str;
|
||||
llama_token fim_pre_token;
|
||||
llama_token fim_sub_token;
|
||||
llama_token fim_mid_token;
|
||||
|
||||
// model meta
|
||||
enum llama_vocab_type model_vocab_type;
|
||||
int32_t model_vocab_n_tokens;
|
||||
int32_t model_n_ctx_train;
|
||||
int32_t model_n_embd_inp;
|
||||
uint64_t model_n_params;
|
||||
uint64_t model_size;
|
||||
};
|
||||
|
||||
struct server_context {
|
||||
std::unique_ptr<server_context_impl> impl;
|
||||
|
||||
server_context();
|
||||
~server_context();
|
||||
|
||||
// load the model and initialize llama_context
|
||||
// returns true on success
|
||||
bool load_model(const common_params & params);
|
||||
|
||||
// this function will block main thread until termination
|
||||
void start_loop();
|
||||
|
||||
// terminate main loop (will unblock start_loop)
|
||||
void terminate();
|
||||
|
||||
// get the underlaying llama_context, can return nullptr if sleeping
|
||||
// not thread-safe, should only be used from the main thread
|
||||
llama_context * get_llama_context() const;
|
||||
|
||||
// get a new response reader, used by CLI application
|
||||
server_response_reader get_response_reader();
|
||||
|
||||
// get server metadata (read-only), can only be called after load_model()
|
||||
// not thread-safe, should only be used from the main thread
|
||||
server_context_meta get_meta() const;
|
||||
};
|
||||
|
||||
|
||||
// forward declarations
|
||||
struct server_res_generator;
|
||||
|
||||
struct server_routes {
|
||||
server_routes(const common_params & params, server_context & ctx_server);
|
||||
|
||||
void init_routes();
|
||||
|
||||
// note: this is not thread-safe and can only when ctx_http.is_ready is false
|
||||
void update_meta(const server_context & ctx_server) {
|
||||
this->meta = std::make_unique<server_context_meta>(ctx_server.get_meta());
|
||||
}
|
||||
|
||||
// handlers using lambda function, so that they can capture `this` without `std::bind`
|
||||
// they won't be called until ctx_http.is_ready is set to true
|
||||
server_http_context::handler_t get_health;
|
||||
server_http_context::handler_t get_metrics;
|
||||
server_http_context::handler_t get_slots;
|
||||
server_http_context::handler_t post_slots;
|
||||
server_http_context::handler_t get_props;
|
||||
server_http_context::handler_t post_props;
|
||||
server_http_context::handler_t get_api_show;
|
||||
server_http_context::handler_t post_infill;
|
||||
server_http_context::handler_t post_completions;
|
||||
server_http_context::handler_t post_completions_oai;
|
||||
server_http_context::handler_t post_chat_completions;
|
||||
server_http_context::handler_t post_anthropic_messages;
|
||||
server_http_context::handler_t post_anthropic_count_tokens;
|
||||
server_http_context::handler_t post_apply_template;
|
||||
server_http_context::handler_t get_models;
|
||||
server_http_context::handler_t post_tokenize;
|
||||
server_http_context::handler_t post_detokenize;
|
||||
server_http_context::handler_t post_embeddings;
|
||||
server_http_context::handler_t post_embeddings_oai;
|
||||
server_http_context::handler_t post_rerank;
|
||||
server_http_context::handler_t get_lora_adapters;
|
||||
server_http_context::handler_t post_lora_adapters;
|
||||
private:
|
||||
std::unique_ptr<server_res_generator> handle_completions_impl(
|
||||
const server_http_req & req,
|
||||
server_task_type type,
|
||||
const json & data,
|
||||
const std::vector<raw_buffer> & files,
|
||||
task_response_type res_type);
|
||||
std::unique_ptr<server_res_generator> handle_slots_save(const server_http_req & req, int id_slot);
|
||||
std::unique_ptr<server_res_generator> handle_slots_restore(const server_http_req & req, int id_slot);
|
||||
std::unique_ptr<server_res_generator> handle_slots_erase(const server_http_req &, int id_slot);
|
||||
std::unique_ptr<server_res_generator> handle_embeddings_impl(const server_http_req & req, task_response_type res_type);
|
||||
|
||||
// using unique_ptr to allow late initialization of const
|
||||
std::unique_ptr<const server_context_meta> meta;
|
||||
|
||||
const common_params & params;
|
||||
const server_context_impl & ctx_server;
|
||||
|
||||
server_queue & queue_tasks;
|
||||
server_response & queue_results;
|
||||
std::unique_ptr<server_res_generator> create_response(bool bypass_sleep = false);
|
||||
};
|
||||
400
tools/server/server-http.cpp
Normal file
@@ -0,0 +1,400 @@
|
||||
#include "common.h"
|
||||
#include "server-http.h"
|
||||
#include "server-common.h"
|
||||
|
||||
#include <cpp-httplib/httplib.h>
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
// auto generated files (see README.md for details)
|
||||
#include "index.html.gz.hpp"
|
||||
#include "loading.html.hpp"
|
||||
|
||||
//
|
||||
// HTTP implementation using cpp-httplib
|
||||
//
|
||||
|
||||
class server_http_context::Impl {
|
||||
public:
|
||||
std::unique_ptr<httplib::Server> srv;
|
||||
};
|
||||
|
||||
server_http_context::server_http_context()
|
||||
: pimpl(std::make_unique<server_http_context::Impl>())
|
||||
{}
|
||||
|
||||
server_http_context::~server_http_context() = default;
|
||||
|
||||
static void log_server_request(const httplib::Request & req, const httplib::Response & res) {
|
||||
// skip GH copilot requests when using default port
|
||||
if (req.path == "/v1/health") {
|
||||
return;
|
||||
}
|
||||
|
||||
// reminder: this function is not covered by httplib's exception handler; if someone does more complicated stuff, think about wrapping it in try-catch
|
||||
|
||||
SRV_INF("request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status);
|
||||
|
||||
SRV_DBG("request: %s\n", req.body.c_str());
|
||||
SRV_DBG("response: %s\n", res.body.c_str());
|
||||
}
|
||||
|
||||
bool server_http_context::init(const common_params & params) {
|
||||
path_prefix = params.api_prefix;
|
||||
port = params.port;
|
||||
hostname = params.hostname;
|
||||
|
||||
auto & srv = pimpl->srv;
|
||||
|
||||
#ifdef CPPHTTPLIB_OPENSSL_SUPPORT
|
||||
if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
|
||||
LOG_INF("Running with SSL: key = %s, cert = %s\n", params.ssl_file_key.c_str(), params.ssl_file_cert.c_str());
|
||||
srv.reset(
|
||||
new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str())
|
||||
);
|
||||
} else {
|
||||
LOG_INF("Running without SSL\n");
|
||||
srv.reset(new httplib::Server());
|
||||
}
|
||||
#else
|
||||
if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
|
||||
LOG_ERR("Server is built without SSL support\n");
|
||||
return false;
|
||||
}
|
||||
srv.reset(new httplib::Server());
|
||||
#endif
|
||||
|
||||
srv->set_default_headers({{"Server", "llama.cpp"}});
|
||||
srv->set_logger(log_server_request);
|
||||
srv->set_exception_handler([](const httplib::Request &, httplib::Response & res, const std::exception_ptr & ep) {
|
||||
// this is fail-safe; exceptions should already handled by `ex_wrapper`
|
||||
|
||||
std::string message;
|
||||
try {
|
||||
std::rethrow_exception(ep);
|
||||
} catch (const std::exception & e) {
|
||||
message = e.what();
|
||||
} catch (...) {
|
||||
message = "Unknown Exception";
|
||||
}
|
||||
|
||||
res.status = 500;
|
||||
res.set_content(message, "text/plain");
|
||||
LOG_ERR("got exception: %s\n", message.c_str());
|
||||
});
|
||||
|
||||
srv->set_error_handler([](const httplib::Request &, httplib::Response & res) {
|
||||
if (res.status == 404) {
|
||||
res.set_content(
|
||||
safe_json_to_str(json {
|
||||
{"error", {
|
||||
{"message", "File Not Found"},
|
||||
{"type", "not_found_error"},
|
||||
{"code", 404}
|
||||
}}
|
||||
}),
|
||||
"application/json; charset=utf-8"
|
||||
);
|
||||
}
|
||||
// for other error codes, we skip processing here because it's already done by res->error()
|
||||
});
|
||||
|
||||
// set timeouts and change hostname and port
|
||||
srv->set_read_timeout (params.timeout_read);
|
||||
srv->set_write_timeout(params.timeout_write);
|
||||
|
||||
if (params.api_keys.size() == 1) {
|
||||
auto key = params.api_keys[0];
|
||||
std::string substr = key.substr(std::max((int)(key.length() - 4), 0));
|
||||
LOG_INF("%s: api_keys: ****%s\n", __func__, substr.c_str());
|
||||
} else if (params.api_keys.size() > 1) {
|
||||
LOG_INF("%s: api_keys: %zu keys loaded\n", __func__, params.api_keys.size());
|
||||
}
|
||||
|
||||
//
|
||||
// Middlewares
|
||||
//
|
||||
|
||||
auto middleware_validate_api_key = [api_keys = params.api_keys](const httplib::Request & req, httplib::Response & res) {
|
||||
static const std::unordered_set<std::string> public_endpoints = {
|
||||
"/health",
|
||||
"/v1/health",
|
||||
"/models",
|
||||
"/v1/models",
|
||||
"/api/tags"
|
||||
};
|
||||
|
||||
// If API key is not set, skip validation
|
||||
if (api_keys.empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If path is public or is static file, skip validation
|
||||
if (public_endpoints.find(req.path) != public_endpoints.end() || req.path == "/") {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for API key in the Authorization header
|
||||
std::string req_api_key = req.get_header_value("Authorization");
|
||||
if (req_api_key.empty()) {
|
||||
// retry with anthropic header
|
||||
req_api_key = req.get_header_value("X-Api-Key");
|
||||
}
|
||||
|
||||
// remove the "Bearer " prefix if needed
|
||||
std::string prefix = "Bearer ";
|
||||
if (req_api_key.substr(0, prefix.size()) == prefix) {
|
||||
req_api_key = req_api_key.substr(prefix.size());
|
||||
}
|
||||
|
||||
// validate the API key
|
||||
if (std::find(api_keys.begin(), api_keys.end(), req_api_key) != api_keys.end()) {
|
||||
return true; // API key is valid
|
||||
}
|
||||
|
||||
// API key is invalid or not provided
|
||||
res.status = 401;
|
||||
res.set_content(
|
||||
safe_json_to_str(json {
|
||||
{"error", {
|
||||
{"message", "Invalid API Key"},
|
||||
{"type", "authentication_error"},
|
||||
{"code", 401}
|
||||
}}
|
||||
}),
|
||||
"application/json; charset=utf-8"
|
||||
);
|
||||
|
||||
LOG_WRN("Unauthorized: Invalid API Key\n");
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto middleware_server_state = [this](const httplib::Request & req, httplib::Response & res) {
|
||||
bool ready = is_ready.load();
|
||||
if (!ready) {
|
||||
auto tmp = string_split<std::string>(req.path, '.');
|
||||
if (req.path == "/" || tmp.back() == "html") {
|
||||
res.status = 503;
|
||||
res.set_content(reinterpret_cast<const char*>(loading_html), loading_html_len, "text/html; charset=utf-8");
|
||||
} else {
|
||||
// no endpoints is allowed to be accessed when the server is not ready
|
||||
// this is to prevent any data races or inconsistent states
|
||||
res.status = 503;
|
||||
res.set_content(
|
||||
safe_json_to_str(json {
|
||||
{"error", {
|
||||
{"message", "Loading model"},
|
||||
{"type", "unavailable_error"},
|
||||
{"code", 503}
|
||||
}}
|
||||
}),
|
||||
"application/json; charset=utf-8"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// register server middlewares
|
||||
srv->set_pre_routing_handler([middleware_validate_api_key, middleware_server_state](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
// If this is OPTIONS request, skip validation because browsers don't include Authorization header
|
||||
if (req.method == "OPTIONS") {
|
||||
res.set_header("Access-Control-Allow-Credentials", "true");
|
||||
res.set_header("Access-Control-Allow-Methods", "GET, POST");
|
||||
res.set_header("Access-Control-Allow-Headers", "*");
|
||||
res.set_content("", "text/html"); // blank response, no data
|
||||
return httplib::Server::HandlerResponse::Handled; // skip further processing
|
||||
}
|
||||
if (!middleware_server_state(req, res)) {
|
||||
return httplib::Server::HandlerResponse::Handled;
|
||||
}
|
||||
if (!middleware_validate_api_key(req, res)) {
|
||||
return httplib::Server::HandlerResponse::Handled;
|
||||
}
|
||||
return httplib::Server::HandlerResponse::Unhandled;
|
||||
});
|
||||
|
||||
int n_threads_http = params.n_threads_http;
|
||||
if (n_threads_http < 1) {
|
||||
// +2 threads for monitoring endpoints
|
||||
n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
|
||||
}
|
||||
LOG_INF("%s: using %d threads for HTTP server\n", __func__, n_threads_http);
|
||||
srv->new_task_queue = [n_threads_http] { return new httplib::ThreadPool(n_threads_http); };
|
||||
|
||||
//
|
||||
// Web UI setup
|
||||
//
|
||||
|
||||
if (!params.webui) {
|
||||
LOG_INF("Web UI is disabled\n");
|
||||
} else {
|
||||
// register static assets routes
|
||||
if (!params.public_path.empty()) {
|
||||
// Set the base directory for serving static files
|
||||
bool is_found = srv->set_mount_point(params.api_prefix + "/", params.public_path);
|
||||
if (!is_found) {
|
||||
LOG_ERR("%s: static assets path not found: %s\n", __func__, params.public_path.c_str());
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
// using embedded static index.html
|
||||
srv->Get(params.api_prefix + "/", [](const httplib::Request & req, httplib::Response & res) {
|
||||
if (req.get_header_value("Accept-Encoding").find("gzip") == std::string::npos) {
|
||||
res.set_content("Error: gzip is not supported by this browser", "text/plain");
|
||||
} else {
|
||||
res.set_header("Content-Encoding", "gzip");
|
||||
// COEP and COOP headers, required by pyodide (python interpreter)
|
||||
res.set_header("Cross-Origin-Embedder-Policy", "require-corp");
|
||||
res.set_header("Cross-Origin-Opener-Policy", "same-origin");
|
||||
res.set_content(reinterpret_cast<const char*>(index_html_gz), index_html_gz_len, "text/html; charset=utf-8");
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool server_http_context::start() {
|
||||
// Bind and listen
|
||||
|
||||
auto & srv = pimpl->srv;
|
||||
bool was_bound = false;
|
||||
bool is_sock = false;
|
||||
if (string_ends_with(std::string(hostname), ".sock")) {
|
||||
is_sock = true;
|
||||
LOG_INF("%s: setting address family to AF_UNIX\n", __func__);
|
||||
srv->set_address_family(AF_UNIX);
|
||||
// bind_to_port requires a second arg, any value other than 0 should
|
||||
// simply get ignored
|
||||
was_bound = srv->bind_to_port(hostname, 8080);
|
||||
} else {
|
||||
LOG_INF("%s: binding port with default address family\n", __func__);
|
||||
// bind HTTP listen port
|
||||
if (port == 0) {
|
||||
int bound_port = srv->bind_to_any_port(hostname);
|
||||
was_bound = (bound_port >= 0);
|
||||
if (was_bound) {
|
||||
port = bound_port;
|
||||
}
|
||||
} else {
|
||||
was_bound = srv->bind_to_port(hostname, port);
|
||||
}
|
||||
}
|
||||
|
||||
if (!was_bound) {
|
||||
LOG_ERR("%s: couldn't bind HTTP server socket, hostname: %s, port: %d\n", __func__, hostname.c_str(), port);
|
||||
return false;
|
||||
}
|
||||
|
||||
// run the HTTP server in a thread
|
||||
thread = std::thread([this]() { pimpl->srv->listen_after_bind(); });
|
||||
srv->wait_until_ready();
|
||||
|
||||
listening_address = is_sock ? string_format("unix://%s", hostname.c_str())
|
||||
: string_format("http://%s:%d", hostname.c_str(), port);
|
||||
return true;
|
||||
}
|
||||
|
||||
void server_http_context::stop() const {
|
||||
if (pimpl->srv) {
|
||||
pimpl->srv->stop();
|
||||
}
|
||||
}
|
||||
|
||||
static void set_headers(httplib::Response & res, const std::map<std::string, std::string> & headers) {
|
||||
for (const auto & [key, value] : headers) {
|
||||
res.set_header(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
static std::map<std::string, std::string> get_params(const httplib::Request & req) {
|
||||
std::map<std::string, std::string> params;
|
||||
for (const auto & [key, value] : req.params) {
|
||||
params[key] = value;
|
||||
}
|
||||
for (const auto & [key, value] : req.path_params) {
|
||||
params[key] = value;
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
static std::map<std::string, std::string> get_headers(const httplib::Request & req) {
|
||||
std::map<std::string, std::string> headers;
|
||||
for (const auto & [key, value] : req.headers) {
|
||||
headers[key] = value;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
// using unique_ptr for request to allow safe capturing in lambdas
|
||||
using server_http_req_ptr = std::unique_ptr<server_http_req>;
|
||||
|
||||
static void process_handler_response(server_http_req_ptr && request, server_http_res_ptr & response, httplib::Response & res) {
|
||||
if (response->is_stream()) {
|
||||
res.status = response->status;
|
||||
set_headers(res, response->headers);
|
||||
std::string content_type = response->content_type;
|
||||
// convert to shared_ptr as both chunked_content_provider() and on_complete() need to use it
|
||||
std::shared_ptr<server_http_req> q_ptr = std::move(request);
|
||||
std::shared_ptr<server_http_res> r_ptr = std::move(response);
|
||||
const auto chunked_content_provider = [response = r_ptr](size_t, httplib::DataSink & sink) -> bool {
|
||||
std::string chunk;
|
||||
bool has_next = response->next(chunk);
|
||||
if (!chunk.empty()) {
|
||||
// TODO: maybe handle sink.write unsuccessful? for now, we rely on is_connection_closed()
|
||||
sink.write(chunk.data(), chunk.size());
|
||||
SRV_DBG("http: streamed chunk: %s\n", chunk.c_str());
|
||||
}
|
||||
if (!has_next) {
|
||||
sink.done();
|
||||
SRV_DBG("%s", "http: stream ended\n");
|
||||
}
|
||||
return has_next;
|
||||
};
|
||||
const auto on_complete = [request = q_ptr, response = r_ptr](bool) mutable {
|
||||
response.reset(); // trigger the destruction of the response object
|
||||
request.reset(); // trigger the destruction of the request object
|
||||
};
|
||||
res.set_chunked_content_provider(content_type, chunked_content_provider, on_complete);
|
||||
} else {
|
||||
res.status = response->status;
|
||||
set_headers(res, response->headers);
|
||||
res.set_content(response->data, response->content_type);
|
||||
}
|
||||
}
|
||||
|
||||
void server_http_context::get(const std::string & path, const server_http_context::handler_t & handler) const {
|
||||
pimpl->srv->Get(path_prefix + path, [handler](const httplib::Request & req, httplib::Response & res) {
|
||||
server_http_req_ptr request = std::make_unique<server_http_req>(server_http_req{
|
||||
get_params(req),
|
||||
get_headers(req),
|
||||
req.path,
|
||||
req.body,
|
||||
req.is_connection_closed
|
||||
});
|
||||
server_http_res_ptr response = handler(*request);
|
||||
process_handler_response(std::move(request), response, res);
|
||||
});
|
||||
}
|
||||
|
||||
void server_http_context::post(const std::string & path, const server_http_context::handler_t & handler) const {
|
||||
pimpl->srv->Post(path_prefix + path, [handler](const httplib::Request & req, httplib::Response & res) {
|
||||
server_http_req_ptr request = std::make_unique<server_http_req>(server_http_req{
|
||||
get_params(req),
|
||||
get_headers(req),
|
||||
req.path,
|
||||
req.body,
|
||||
req.is_connection_closed
|
||||
});
|
||||
server_http_res_ptr response = handler(*request);
|
||||
process_handler_response(std::move(request), response, res);
|
||||
});
|
||||
}
|
||||
|
||||
78
tools/server/server-http.h
Normal file
@@ -0,0 +1,78 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
struct common_params;
|
||||
|
||||
// generator-like API for HTTP response generation
|
||||
// this object response with one of the 2 modes:
|
||||
// 1) normal response: `data` contains the full response body
|
||||
// 2) streaming response: each call to next(output) generates the next chunk
|
||||
// when next(output) returns false, no more data after the current chunk
|
||||
// note: some chunks can be empty, in which case no data is sent for that chunk
|
||||
struct server_http_res {
|
||||
std::string content_type = "application/json; charset=utf-8";
|
||||
int status = 200;
|
||||
std::string data;
|
||||
std::map<std::string, std::string> headers;
|
||||
|
||||
// TODO: move this to a virtual function once we have proper polymorphism support
|
||||
std::function<bool(std::string &)> next = nullptr;
|
||||
bool is_stream() const {
|
||||
return next != nullptr;
|
||||
}
|
||||
|
||||
virtual ~server_http_res() = default;
|
||||
};
|
||||
|
||||
// unique pointer, used by set_chunked_content_provider
|
||||
// httplib requires the stream provider to be stored in heap
|
||||
using server_http_res_ptr = std::unique_ptr<server_http_res>;
|
||||
|
||||
struct server_http_req {
|
||||
std::map<std::string, std::string> params; // path_params + query_params
|
||||
std::map<std::string, std::string> headers; // reserved for future use
|
||||
std::string path; // reserved for future use
|
||||
std::string body;
|
||||
const std::function<bool()> & should_stop;
|
||||
|
||||
std::string get_param(const std::string & key, const std::string & def = "") const {
|
||||
auto it = params.find(key);
|
||||
if (it != params.end()) {
|
||||
return it->second;
|
||||
}
|
||||
return def;
|
||||
}
|
||||
};
|
||||
|
||||
struct server_http_context {
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
|
||||
std::thread thread; // server thread
|
||||
std::atomic<bool> is_ready = false;
|
||||
|
||||
std::string path_prefix;
|
||||
std::string hostname;
|
||||
int port;
|
||||
|
||||
server_http_context();
|
||||
~server_http_context();
|
||||
|
||||
bool init(const common_params & params);
|
||||
bool start();
|
||||
void stop() const;
|
||||
|
||||
// note: the handler should never throw exceptions
|
||||
using handler_t = std::function<server_http_res_ptr(const server_http_req & req)>;
|
||||
|
||||
void get(const std::string & path, const handler_t & handler) const;
|
||||
void post(const std::string & path, const handler_t & handler) const;
|
||||
|
||||
// for debugging
|
||||
std::string listening_address;
|
||||
};
|
||||
1092
tools/server/server-models.cpp
Normal file
203
tools/server/server-models.h
Normal file
@@ -0,0 +1,203 @@
|
||||
#pragma once
|
||||
|
||||
#include "common.h"
|
||||
#include "preset.h"
|
||||
#include "server-common.h"
|
||||
#include "server-http.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
/**
|
||||
* state diagram:
|
||||
*
|
||||
* UNLOADED ──► LOADING ──► LOADED
|
||||
* ▲ │ │
|
||||
* └───failed───┘ │
|
||||
* ▲ │
|
||||
* └────────unloaded─────────┘
|
||||
*/
|
||||
enum server_model_status {
|
||||
// TODO: also add downloading state when the logic is added
|
||||
SERVER_MODEL_STATUS_UNLOADED,
|
||||
SERVER_MODEL_STATUS_LOADING,
|
||||
SERVER_MODEL_STATUS_LOADED
|
||||
};
|
||||
|
||||
static server_model_status server_model_status_from_string(const std::string & status_str) {
|
||||
if (status_str == "unloaded") {
|
||||
return SERVER_MODEL_STATUS_UNLOADED;
|
||||
}
|
||||
if (status_str == "loading") {
|
||||
return SERVER_MODEL_STATUS_LOADING;
|
||||
}
|
||||
if (status_str == "loaded") {
|
||||
return SERVER_MODEL_STATUS_LOADED;
|
||||
}
|
||||
throw std::runtime_error("invalid server model status");
|
||||
}
|
||||
|
||||
static std::string server_model_status_to_string(server_model_status status) {
|
||||
switch (status) {
|
||||
case SERVER_MODEL_STATUS_UNLOADED: return "unloaded";
|
||||
case SERVER_MODEL_STATUS_LOADING: return "loading";
|
||||
case SERVER_MODEL_STATUS_LOADED: return "loaded";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
struct server_model_meta {
|
||||
common_preset preset;
|
||||
std::string name;
|
||||
int port = 0;
|
||||
server_model_status status = SERVER_MODEL_STATUS_UNLOADED;
|
||||
int64_t last_used = 0; // for LRU unloading
|
||||
std::vector<std::string> args; // args passed to the model instance, will be populated by render_args()
|
||||
int exit_code = 0; // exit code of the model instance process (only valid if status == FAILED)
|
||||
int stop_timeout = 0; // seconds to wait before force-killing the model instance during shutdown
|
||||
|
||||
bool is_active() const {
|
||||
return status == SERVER_MODEL_STATUS_LOADED || status == SERVER_MODEL_STATUS_LOADING;
|
||||
}
|
||||
|
||||
bool is_failed() const {
|
||||
return status == SERVER_MODEL_STATUS_UNLOADED && exit_code != 0;
|
||||
}
|
||||
|
||||
void update_args(common_preset_context & ctx_presets, std::string bin_path);
|
||||
};
|
||||
|
||||
struct subprocess_s;
|
||||
|
||||
struct server_models {
|
||||
private:
|
||||
struct instance_t {
|
||||
std::shared_ptr<subprocess_s> subproc; // shared between main thread and monitoring thread
|
||||
std::thread th;
|
||||
server_model_meta meta;
|
||||
FILE * stdin_file = nullptr;
|
||||
};
|
||||
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
std::map<std::string, instance_t> mapping;
|
||||
|
||||
// for stopping models
|
||||
std::condition_variable cv_stop;
|
||||
std::set<std::string> stopping_models;
|
||||
|
||||
common_preset_context ctx_preset;
|
||||
|
||||
common_params base_params;
|
||||
std::string bin_path;
|
||||
std::vector<std::string> base_env;
|
||||
common_preset base_preset; // base preset from llama-server CLI args
|
||||
|
||||
void update_meta(const std::string & name, const server_model_meta & meta);
|
||||
|
||||
// unload least recently used models if the limit is reached
|
||||
void unload_lru();
|
||||
|
||||
// not thread-safe, caller must hold mutex
|
||||
void add_model(server_model_meta && meta);
|
||||
|
||||
public:
|
||||
server_models(const common_params & params, int argc, char ** argv);
|
||||
|
||||
void load_models();
|
||||
|
||||
// check if a model instance exists (thread-safe)
|
||||
bool has_model(const std::string & name);
|
||||
|
||||
// return a copy of model metadata (thread-safe)
|
||||
std::optional<server_model_meta> get_meta(const std::string & name);
|
||||
|
||||
// return a copy of all model metadata (thread-safe)
|
||||
std::vector<server_model_meta> get_all_meta();
|
||||
|
||||
// load and unload model instances
|
||||
// these functions are thread-safe
|
||||
void load(const std::string & name);
|
||||
void unload(const std::string & name);
|
||||
void unload_all();
|
||||
|
||||
// update the status of a model instance (thread-safe)
|
||||
void update_status(const std::string & name, server_model_status status, int exit_code);
|
||||
|
||||
// wait until the model instance is fully loaded (thread-safe)
|
||||
// return when the model is loaded or failed to load
|
||||
void wait_until_loaded(const std::string & name);
|
||||
|
||||
// load the model if not loaded, otherwise do nothing (thread-safe)
|
||||
// return false if model is already loaded; return true otherwise (meta may need to be refreshed)
|
||||
bool ensure_model_loaded(const std::string & name);
|
||||
|
||||
// proxy an HTTP request to the model instance
|
||||
server_http_res_ptr proxy_request(const server_http_req & req, const std::string & method, const std::string & name, bool update_last_used);
|
||||
|
||||
// notify the router server that a model instance is ready
|
||||
// return the monitoring thread (to be joined by the caller)
|
||||
static std::thread setup_child_server(const std::function<void(int)> & shutdown_handler);
|
||||
};
|
||||
|
||||
struct server_models_routes {
|
||||
common_params params;
|
||||
json webui_settings = json::object();
|
||||
server_models models;
|
||||
server_models_routes(const common_params & params, int argc, char ** argv)
|
||||
: params(params), models(params, argc, argv) {
|
||||
if (!this->params.webui_config_json.empty()) {
|
||||
try {
|
||||
webui_settings = json::parse(this->params.webui_config_json);
|
||||
} catch (const std::exception & e) {
|
||||
LOG_ERR("%s: failed to parse webui config: %s\n", __func__, e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
init_routes();
|
||||
}
|
||||
|
||||
void init_routes();
|
||||
// handlers using lambda function, so that they can capture `this` without `std::bind`
|
||||
server_http_context::handler_t get_router_props;
|
||||
server_http_context::handler_t proxy_get;
|
||||
server_http_context::handler_t proxy_post;
|
||||
server_http_context::handler_t get_router_models;
|
||||
server_http_context::handler_t post_router_models_load;
|
||||
server_http_context::handler_t post_router_models_unload;
|
||||
};
|
||||
|
||||
/**
|
||||
* A simple HTTP proxy that forwards requests to another server
|
||||
* and relays the responses back.
|
||||
*/
|
||||
struct server_http_proxy : server_http_res {
|
||||
std::function<void()> cleanup = nullptr;
|
||||
public:
|
||||
server_http_proxy(const std::string & method,
|
||||
const std::string & host,
|
||||
int port,
|
||||
const std::string & path,
|
||||
const std::map<std::string, std::string> & headers,
|
||||
const std::string & body,
|
||||
const std::function<bool()> should_stop,
|
||||
int32_t timeout_read,
|
||||
int32_t timeout_write
|
||||
);
|
||||
~server_http_proxy() {
|
||||
if (cleanup) {
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
private:
|
||||
std::thread thread;
|
||||
struct msg_t {
|
||||
std::map<std::string, std::string> headers;
|
||||
int status = 0;
|
||||
std::string data;
|
||||
std::string content_type;
|
||||
};
|
||||
};
|
||||
450
tools/server/server-queue.cpp
Normal file
@@ -0,0 +1,450 @@
|
||||
#include "server-task.h"
|
||||
#include "server-queue.h"
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
|
||||
#define RES_INF(fmt, ...) LOG_INF("res %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define RES_WRN(fmt, ...) LOG_WRN("res %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define RES_ERR(fmt, ...) LOG_ERR("res %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define RES_DBG(fmt, ...) LOG_DBG("res %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
|
||||
//
|
||||
// server_queue
|
||||
//
|
||||
|
||||
int server_queue::post(server_task && task, bool front) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
GGML_ASSERT(task.id != -1);
|
||||
// if this is cancel task make sure to clean up pending tasks
|
||||
if (task.type == SERVER_TASK_TYPE_CANCEL) {
|
||||
cleanup_pending_task(task.id_target);
|
||||
}
|
||||
const int task_id = task.id;
|
||||
QUE_DBG("new task, id = %d, front = %d\n", task_id, front);
|
||||
if (front) {
|
||||
queue_tasks.push_front(std::move(task));
|
||||
} else {
|
||||
queue_tasks.push_back(std::move(task));
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
return task_id;
|
||||
}
|
||||
|
||||
int server_queue::post(std::vector<server_task> && tasks, bool front) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
for (auto & task : tasks) {
|
||||
if (task.id == -1) {
|
||||
task.id = id++;
|
||||
}
|
||||
// if this is cancel task make sure to clean up pending tasks
|
||||
if (task.type == SERVER_TASK_TYPE_CANCEL) {
|
||||
cleanup_pending_task(task.id_target);
|
||||
}
|
||||
QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int) tasks.size(), front);
|
||||
if (front) {
|
||||
queue_tasks.push_front(std::move(task));
|
||||
} else {
|
||||
queue_tasks.push_back(std::move(task));
|
||||
}
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void server_queue::defer(server_task && task) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
QUE_DBG("defer task, id = %d\n", task.id);
|
||||
queue_tasks_deferred.push_back(std::move(task));
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
}
|
||||
|
||||
int server_queue::get_new_id() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
int new_id = id++;
|
||||
return new_id;
|
||||
}
|
||||
|
||||
void server_queue::pop_deferred_task(int id_slot) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!queue_tasks_deferred.empty()) {
|
||||
// try to find a task that uses the specified slot
|
||||
bool found = false;
|
||||
for (auto it = queue_tasks_deferred.begin(); it != queue_tasks_deferred.end(); ++it) {
|
||||
if (it->id_slot == id_slot) {
|
||||
QUE_DBG("pop deferred task (use slot %d), id_task = %d\n", id_slot, it->id);
|
||||
queue_tasks.emplace_front(std::move(*it));
|
||||
queue_tasks_deferred.erase(it);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// if not tasks found using the slot, just pop the first deferred task (default behavior)
|
||||
if (!found) {
|
||||
QUE_DBG("pop deferred task, id_task = %d\n", queue_tasks_deferred.front().id);
|
||||
queue_tasks.emplace_front(std::move(queue_tasks_deferred.front()));
|
||||
queue_tasks_deferred.pop_front();
|
||||
}
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
}
|
||||
|
||||
void server_queue::wait_until_no_sleep() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!sleeping) {
|
||||
return;
|
||||
} else {
|
||||
if (!req_stop_sleeping) {
|
||||
QUE_DBG("%s", "requesting to stop sleeping\n");
|
||||
req_stop_sleeping = true;
|
||||
condition_tasks.notify_one(); // only main thread is waiting on this
|
||||
}
|
||||
QUE_DBG("%s", "waiting until no sleep\n");
|
||||
condition_tasks.wait(lock, [&]{
|
||||
return !sleeping;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void server_queue::terminate() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
running = false;
|
||||
condition_tasks.notify_all();
|
||||
}
|
||||
|
||||
void server_queue::start_loop(int64_t idle_sleep_ms) {
|
||||
running = true;
|
||||
time_last_task = ggml_time_ms();
|
||||
|
||||
constexpr auto max_wait_time = std::chrono::seconds(1);
|
||||
auto should_sleep = [&]() -> bool {
|
||||
// caller must hold mutex_tasks
|
||||
if (idle_sleep_ms < 0) {
|
||||
return false;
|
||||
}
|
||||
int64_t now = ggml_time_ms();
|
||||
return (now - time_last_task) >= idle_sleep_ms;
|
||||
};
|
||||
|
||||
while (true) {
|
||||
QUE_DBG("%s", "processing new tasks\n");
|
||||
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!running) {
|
||||
QUE_DBG("%s", "terminate\n");
|
||||
return;
|
||||
}
|
||||
if (queue_tasks.empty()) {
|
||||
lock.unlock();
|
||||
break;
|
||||
}
|
||||
server_task task = std::move(queue_tasks.front());
|
||||
queue_tasks.pop_front();
|
||||
lock.unlock();
|
||||
|
||||
QUE_DBG("processing task, id = %d\n", task.id);
|
||||
callback_new_task(std::move(task));
|
||||
}
|
||||
// all tasks in the current loop is processed, slots data is now ready
|
||||
QUE_DBG("%s", "update slots\n");
|
||||
|
||||
// this will run the main inference process for all slots
|
||||
callback_update_slots();
|
||||
{
|
||||
// update_slots() may take a while to finish, we need to make sure it's not counted as idle
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
time_last_task = ggml_time_ms();
|
||||
}
|
||||
|
||||
QUE_DBG("%s", "waiting for new tasks\n");
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!running || !queue_tasks.empty()) {
|
||||
break; // go back to process new tasks or terminate
|
||||
}
|
||||
|
||||
// no tasks, check for sleeping state
|
||||
if (should_sleep()) {
|
||||
QUE_INF("%s", "entering sleeping state\n");
|
||||
sleeping = true;
|
||||
callback_sleeping_state(true);
|
||||
req_stop_sleeping = false;
|
||||
// wait until we are requested to exit sleeping state
|
||||
condition_tasks.wait(lock, [&]{
|
||||
return (!running || req_stop_sleeping);
|
||||
});
|
||||
if (!running) { // may changed during sleep
|
||||
break; // terminate
|
||||
}
|
||||
QUE_INF("%s", "exiting sleeping state\n");
|
||||
req_stop_sleeping = false;
|
||||
callback_sleeping_state(false);
|
||||
sleeping = false;
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_all(); // notify wait_until_no_sleep()
|
||||
break; // process new tasks
|
||||
} else {
|
||||
// wait for new tasks or timeout for checking sleeping condition
|
||||
bool res = condition_tasks.wait_for(lock, max_wait_time, [&]{
|
||||
return (!queue_tasks.empty() || !running);
|
||||
});
|
||||
if (res) {
|
||||
break; // new task arrived or terminate
|
||||
}
|
||||
// otherwise, loop again to check sleeping condition
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void server_queue::cleanup_pending_task(int id_target) {
|
||||
// no need lock because this is called exclusively by post()
|
||||
auto rm_func = [id_target](const server_task & task) {
|
||||
return task.id == id_target;
|
||||
};
|
||||
queue_tasks.erase(
|
||||
std::remove_if(queue_tasks.begin(), queue_tasks.end(), rm_func),
|
||||
queue_tasks.end());
|
||||
queue_tasks_deferred.erase(
|
||||
std::remove_if(queue_tasks_deferred.begin(), queue_tasks_deferred.end(), rm_func),
|
||||
queue_tasks_deferred.end());
|
||||
}
|
||||
|
||||
//
|
||||
// server_response
|
||||
//
|
||||
|
||||
void server_response::add_waiting_task_id(int id_task) {
|
||||
RES_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size());
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
waiting_task_ids.insert(id_task);
|
||||
}
|
||||
|
||||
void server_response::add_waiting_task_ids(const std::unordered_set<int> & id_tasks) {
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
|
||||
for (const auto & id_task : id_tasks) {
|
||||
RES_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size());
|
||||
waiting_task_ids.insert(id_task);
|
||||
}
|
||||
}
|
||||
|
||||
void server_response::remove_waiting_task_id(int id_task) {
|
||||
RES_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
waiting_task_ids.erase(id_task);
|
||||
// make sure to clean up all pending results
|
||||
queue_results.erase(
|
||||
std::remove_if(queue_results.begin(), queue_results.end(), [id_task](const server_task_result_ptr & res) {
|
||||
return res->id == id_task;
|
||||
}),
|
||||
queue_results.end());
|
||||
}
|
||||
|
||||
void server_response::remove_waiting_task_ids(const std::unordered_set<int> & id_tasks) {
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
|
||||
for (const auto & id_task : id_tasks) {
|
||||
RES_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
|
||||
waiting_task_ids.erase(id_task);
|
||||
}
|
||||
}
|
||||
|
||||
server_task_result_ptr server_response::recv(const std::unordered_set<int> & id_tasks) {
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
condition_results.wait(lock, [&]{
|
||||
if (!running) {
|
||||
RES_DBG("%s : queue result stop\n", "recv");
|
||||
std::terminate(); // we cannot return here since the caller is HTTP code
|
||||
}
|
||||
return !queue_results.empty();
|
||||
});
|
||||
|
||||
for (size_t i = 0; i < queue_results.size(); i++) {
|
||||
if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) {
|
||||
server_task_result_ptr res = std::move(queue_results[i]);
|
||||
queue_results.erase(queue_results.begin() + i);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// should never reach here
|
||||
}
|
||||
|
||||
server_task_result_ptr server_response::recv_with_timeout(const std::unordered_set<int> & id_tasks, int timeout) {
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
|
||||
for (int i = 0; i < (int) queue_results.size(); i++) {
|
||||
if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) {
|
||||
server_task_result_ptr res = std::move(queue_results[i]);
|
||||
queue_results.erase(queue_results.begin() + i);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
std::cv_status cr_res = condition_results.wait_for(lock, std::chrono::seconds(timeout));
|
||||
if (!running) {
|
||||
RES_DBG("%s : queue result stop\n", __func__);
|
||||
std::terminate(); // we cannot return here since the caller is HTTP code
|
||||
}
|
||||
if (cr_res == std::cv_status::timeout) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// should never reach here
|
||||
}
|
||||
|
||||
server_task_result_ptr server_response::recv(int id_task) {
|
||||
std::unordered_set<int> id_tasks = {id_task};
|
||||
return recv(id_tasks);
|
||||
}
|
||||
|
||||
void server_response::send(server_task_result_ptr && result) {
|
||||
RES_DBG("sending result for task id = %d\n", result->id);
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_results);
|
||||
for (const auto & id_task : waiting_task_ids) {
|
||||
if (result->id == id_task) {
|
||||
RES_DBG("task id = %d pushed to result queue\n", result->id);
|
||||
|
||||
queue_results.emplace_back(std::move(result));
|
||||
condition_results.notify_all();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void server_response::terminate() {
|
||||
running = false;
|
||||
condition_results.notify_all();
|
||||
}
|
||||
|
||||
//
|
||||
// server_response_reader
|
||||
//
|
||||
|
||||
void server_response_reader::post_task(server_task && task, bool front) {
|
||||
GGML_ASSERT(id_tasks.empty() && "post_task() can only be called once per reader");
|
||||
GGML_ASSERT(!task.is_parent() && "not supported, use post_tasks() instead");
|
||||
task.index = 0;
|
||||
id_tasks.insert(task.id);
|
||||
states.push_back(task.create_state());
|
||||
queue_results.add_waiting_task_id(task.id);
|
||||
queue_tasks.post(std::move(task), front);
|
||||
}
|
||||
|
||||
void server_response_reader::post_tasks(std::vector<server_task> && tasks, bool front) {
|
||||
GGML_ASSERT(id_tasks.empty() && "post_tasks() can only be called once per reader");
|
||||
id_tasks = server_task::get_list_id(tasks);
|
||||
states.reserve(tasks.size());
|
||||
size_t index = 0;
|
||||
for (auto & task : tasks) {
|
||||
task.index = index++;
|
||||
states.push_back(task.create_state());
|
||||
// for child tasks
|
||||
for (auto & child_task : task.child_tasks) {
|
||||
child_task.index = index++;
|
||||
states.push_back(child_task.create_state());
|
||||
}
|
||||
}
|
||||
GGML_ASSERT(states.size() == id_tasks.size());
|
||||
queue_results.add_waiting_task_ids(id_tasks);
|
||||
queue_tasks.post(std::move(tasks), front);
|
||||
}
|
||||
|
||||
bool server_response_reader::has_next() const {
|
||||
return !cancelled && received_count < id_tasks.size();
|
||||
}
|
||||
|
||||
// return nullptr if should_stop() is true before receiving a result
|
||||
// note: if one error is received, it will stop further processing and return error result
|
||||
server_task_result_ptr server_response_reader::next(const std::function<bool()> & should_stop) {
|
||||
while (true) {
|
||||
server_task_result_ptr result = queue_results.recv_with_timeout(id_tasks, polling_interval_seconds);
|
||||
if (result == nullptr) {
|
||||
// timeout, check stop condition
|
||||
if (should_stop()) {
|
||||
SRV_DBG("%s", "stopping wait for next result due to should_stop condition\n");
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
if (result->is_error()) {
|
||||
stop(); // cancel remaining tasks
|
||||
SRV_DBG("%s", "received error result, stopping further processing\n");
|
||||
return result;
|
||||
}
|
||||
if (!states.empty()) {
|
||||
// update the generation state if needed
|
||||
const size_t idx = result->index;
|
||||
GGML_ASSERT(idx < states.size());
|
||||
result->update(states[idx]);
|
||||
}
|
||||
if (result->is_stop()) {
|
||||
received_count++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// should not reach here
|
||||
}
|
||||
|
||||
server_response_reader::batch_response server_response_reader::wait_for_all(const std::function<bool()> & should_stop) {
|
||||
batch_response batch_res;
|
||||
batch_res.results.clear();
|
||||
batch_res.results.resize(id_tasks.size());
|
||||
while (has_next()) {
|
||||
auto res = next(should_stop);
|
||||
if (res == nullptr) {
|
||||
batch_res.is_terminated = true;
|
||||
return batch_res;
|
||||
}
|
||||
if (res->is_error()) {
|
||||
batch_res.error = std::move(res);
|
||||
return batch_res;
|
||||
}
|
||||
const size_t idx = res->index;
|
||||
GGML_ASSERT(idx < batch_res.results.size() && "index out of range");
|
||||
GGML_ASSERT(batch_res.results[idx] == nullptr && "duplicate result received");
|
||||
batch_res.results[idx] = std::move(res);
|
||||
}
|
||||
return batch_res;
|
||||
}
|
||||
|
||||
void server_response_reader::stop() {
|
||||
queue_results.remove_waiting_task_ids(id_tasks);
|
||||
if (has_next() && !cancelled) {
|
||||
// if tasks is not finished yet, cancel them
|
||||
cancelled = true;
|
||||
std::vector<server_task> cancel_tasks;
|
||||
cancel_tasks.reserve(id_tasks.size());
|
||||
for (const auto & id_task : id_tasks) {
|
||||
SRV_WRN("cancel task, id_task = %d\n", id_task);
|
||||
server_task task(SERVER_TASK_TYPE_CANCEL);
|
||||
task.id_target = id_task;
|
||||
queue_results.remove_waiting_task_id(id_task);
|
||||
cancel_tasks.push_back(std::move(task));
|
||||
}
|
||||
// push to beginning of the queue, so it has highest priority
|
||||
queue_tasks.post(std::move(cancel_tasks), true);
|
||||
} else {
|
||||
SRV_DBG("%s", "all tasks already finished, no need to cancel\n");
|
||||
}
|
||||
}
|
||||
197
tools/server/server-queue.h
Normal file
@@ -0,0 +1,197 @@
|
||||
#pragma once
|
||||
|
||||
#include "server-task.h"
|
||||
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include <unordered_set>
|
||||
|
||||
// struct for managing server tasks
|
||||
// in most cases, use server_response_reader to post new tasks and retrieve results
|
||||
struct server_queue {
|
||||
private:
|
||||
int id = 0;
|
||||
bool running = false;
|
||||
bool sleeping = false;
|
||||
bool req_stop_sleeping = false;
|
||||
int64_t time_last_task = 0;
|
||||
|
||||
// queues
|
||||
std::deque<server_task> queue_tasks;
|
||||
std::deque<server_task> queue_tasks_deferred;
|
||||
|
||||
std::mutex mutex_tasks;
|
||||
std::condition_variable condition_tasks;
|
||||
|
||||
// callback functions
|
||||
std::function<void(server_task &&)> callback_new_task;
|
||||
std::function<void(void)> callback_update_slots;
|
||||
std::function<void(bool)> callback_sleeping_state;
|
||||
|
||||
public:
|
||||
// Add a new task to the end of the queue
|
||||
int post(server_task && task, bool front = false);
|
||||
|
||||
// multi-task version of post()
|
||||
int post(std::vector<server_task> && tasks, bool front = false);
|
||||
|
||||
// Add a new task, but defer until one slot is available
|
||||
void defer(server_task && task);
|
||||
|
||||
// Get the next id for creating a new task
|
||||
int get_new_id();
|
||||
|
||||
// Call when the state of one slot is changed, it will move one task from deferred to main queue
|
||||
// prioritize tasks that use the specified slot (otherwise, pop the first deferred task)
|
||||
void pop_deferred_task(int id_slot);
|
||||
|
||||
// if sleeping, request exiting sleep state and wait until it is done
|
||||
// returns immediately if not sleeping
|
||||
void wait_until_no_sleep();
|
||||
|
||||
bool is_sleeping() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
return sleeping;
|
||||
}
|
||||
|
||||
// end the start_loop routine
|
||||
void terminate();
|
||||
|
||||
/**
|
||||
* Main loop consists of these steps:
|
||||
* - Wait until a new task arrives
|
||||
* - Process the task (i.e. maybe copy data into slot)
|
||||
* - Check if multitask is finished
|
||||
* - Update all slots
|
||||
*
|
||||
* Sleeping procedure (disabled if idle_sleep_ms < 0):
|
||||
* - If there is no task after idle_sleep_ms, enter sleeping state
|
||||
* - Call callback_sleeping_state(true)
|
||||
* - Wait until req_stop_sleeping is set to true
|
||||
* - Call callback_sleeping_state(false)
|
||||
* - Exit sleeping state
|
||||
*/
|
||||
void start_loop(int64_t idle_sleep_ms = -1);
|
||||
|
||||
// for metrics
|
||||
size_t queue_tasks_deferred_size() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
return queue_tasks_deferred.size();
|
||||
}
|
||||
|
||||
//
|
||||
// Functions below are not thread-safe, must only be used before start_loop() is called
|
||||
//
|
||||
|
||||
// Register function to process a new task
|
||||
void on_new_task(std::function<void(server_task &&)> callback) {
|
||||
callback_new_task = std::move(callback);
|
||||
}
|
||||
|
||||
// Register the function to be called when all slots data is ready to be processed
|
||||
void on_update_slots(std::function<void(void)> callback) {
|
||||
callback_update_slots = std::move(callback);
|
||||
}
|
||||
|
||||
// Register callback for sleeping state change
|
||||
// note: when entering sleeping state, the callback is called AFTER sleeping is set to true
|
||||
// when leaving sleeping state, the callback is called BEFORE sleeping is set to false
|
||||
void on_sleeping_state(std::function<void(bool)> callback) {
|
||||
callback_sleeping_state = std::move(callback);
|
||||
}
|
||||
|
||||
private:
|
||||
void cleanup_pending_task(int id_target);
|
||||
};
|
||||
|
||||
// struct for managing server responses
|
||||
// in most cases, use server_response_reader to retrieve results
|
||||
struct server_response {
|
||||
private:
|
||||
bool running = true;
|
||||
|
||||
// for keeping track of all tasks waiting for the result
|
||||
std::unordered_set<int> waiting_task_ids;
|
||||
|
||||
// the main result queue (using ptr for polymorphism)
|
||||
std::vector<server_task_result_ptr> queue_results;
|
||||
|
||||
std::mutex mutex_results;
|
||||
std::condition_variable condition_results;
|
||||
|
||||
public:
|
||||
// add the id_task to the list of tasks waiting for response
|
||||
void add_waiting_task_id(int id_task);
|
||||
|
||||
void add_waiting_task_ids(const std::unordered_set<int> & id_tasks);
|
||||
|
||||
// when the request is finished, we can remove task associated with it
|
||||
void remove_waiting_task_id(int id_task);
|
||||
|
||||
// remove multiple tasks from waiting list
|
||||
void remove_waiting_task_ids(const std::unordered_set<int> & id_tasks);
|
||||
|
||||
// This function blocks the thread until there is a response for one of the id_tasks
|
||||
server_task_result_ptr recv(const std::unordered_set<int> & id_tasks);
|
||||
|
||||
// same as recv(), but have timeout in seconds
|
||||
// if timeout is reached, nullptr is returned
|
||||
server_task_result_ptr recv_with_timeout(const std::unordered_set<int> & id_tasks, int timeout);
|
||||
|
||||
// single-task version of recv()
|
||||
server_task_result_ptr recv(int id_task);
|
||||
|
||||
// Send a new result to a waiting id_task
|
||||
void send(server_task_result_ptr && result);
|
||||
|
||||
// terminate the waiting loop
|
||||
void terminate();
|
||||
};
|
||||
|
||||
// utility class to make working with server_queue and server_response easier
|
||||
// it provides a generator-like API for server responses
|
||||
// support pooling connection state and aggregating multiple results
|
||||
struct server_response_reader {
|
||||
std::unordered_set<int> id_tasks;
|
||||
server_queue & queue_tasks;
|
||||
server_response & queue_results;
|
||||
size_t received_count = 0;
|
||||
bool cancelled = false;
|
||||
int polling_interval_seconds;
|
||||
|
||||
// tracking generation state and partial tool calls
|
||||
// only used by streaming completions
|
||||
std::vector<task_result_state> states;
|
||||
|
||||
// should_stop function will be called each polling_interval_seconds
|
||||
server_response_reader(server_queue & queue_tasks, server_response & queue_results, int polling_interval_seconds)
|
||||
: queue_tasks(queue_tasks), queue_results(queue_results), polling_interval_seconds(polling_interval_seconds) {}
|
||||
~server_response_reader() {
|
||||
stop();
|
||||
}
|
||||
|
||||
int get_new_id() {
|
||||
return queue_tasks.get_new_id();
|
||||
}
|
||||
|
||||
// if front = true, the task will be posted to the front of the queue (high priority)
|
||||
void post_task(server_task && task, bool front = false);
|
||||
void post_tasks(std::vector<server_task> && tasks, bool front = false);
|
||||
bool has_next() const;
|
||||
|
||||
// return nullptr if should_stop() is true before receiving a result
|
||||
// note: if one error is received, it will stop further processing and return error result
|
||||
server_task_result_ptr next(const std::function<bool()> & should_stop);
|
||||
|
||||
struct batch_response {
|
||||
bool is_terminated = false; // if true, indicates that processing was stopped before all results were received
|
||||
std::vector<server_task_result_ptr> results;
|
||||
server_task_result_ptr error; // nullptr if no error
|
||||
};
|
||||
// aggregate multiple results
|
||||
batch_response wait_for_all(const std::function<bool()> & should_stop);
|
||||
|
||||
void stop();
|
||||
};
|
||||
1643
tools/server/server-task.cpp
Normal file
602
tools/server/server-task.h
Normal file
@@ -0,0 +1,602 @@
|
||||
#pragma once
|
||||
|
||||
#include "common.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <list>
|
||||
#include <map>
|
||||
|
||||
// TODO: prevent including the whole server-common.h as we only use server_tokens
|
||||
#include "server-common.h"
|
||||
|
||||
using json = nlohmann::ordered_json;
|
||||
|
||||
enum server_task_type {
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
SERVER_TASK_TYPE_EMBEDDING,
|
||||
SERVER_TASK_TYPE_RERANK,
|
||||
SERVER_TASK_TYPE_INFILL,
|
||||
SERVER_TASK_TYPE_CANCEL,
|
||||
SERVER_TASK_TYPE_NEXT_RESPONSE,
|
||||
SERVER_TASK_TYPE_METRICS,
|
||||
SERVER_TASK_TYPE_SLOT_SAVE,
|
||||
SERVER_TASK_TYPE_SLOT_RESTORE,
|
||||
SERVER_TASK_TYPE_SLOT_ERASE,
|
||||
SERVER_TASK_TYPE_GET_LORA,
|
||||
SERVER_TASK_TYPE_SET_LORA,
|
||||
};
|
||||
|
||||
// TODO: change this to more generic "response_format" to replace the "format_response_*" in server-common
|
||||
enum task_response_type {
|
||||
TASK_RESPONSE_TYPE_NONE, // llama.cpp native format
|
||||
TASK_RESPONSE_TYPE_OAI_CHAT,
|
||||
TASK_RESPONSE_TYPE_OAI_CMPL,
|
||||
TASK_RESPONSE_TYPE_OAI_EMBD,
|
||||
TASK_RESPONSE_TYPE_ANTHROPIC,
|
||||
};
|
||||
|
||||
enum stop_type {
|
||||
STOP_TYPE_NONE,
|
||||
STOP_TYPE_EOS,
|
||||
STOP_TYPE_WORD,
|
||||
STOP_TYPE_LIMIT,
|
||||
};
|
||||
|
||||
struct task_params {
|
||||
bool stream = true;
|
||||
bool include_usage = false;
|
||||
bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt
|
||||
bool return_tokens = false;
|
||||
bool return_progress = false;
|
||||
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_indent = 0; // minimum line indentation for the generated text in number of whitespace characters
|
||||
int32_t n_cmpl = 1; // number of completions to generate from this prompt
|
||||
|
||||
int32_t n_cache_reuse = 0; // min chunk size to attempt reusing from the cache via KV shifting (0 = disabled)
|
||||
|
||||
int64_t t_max_prompt_ms = -1; // TODO: implement
|
||||
int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit
|
||||
|
||||
std::map<int, float> lora; // mapping adapter ID -> scale
|
||||
|
||||
std::vector<std::string> antiprompt;
|
||||
std::vector<std::string> response_fields;
|
||||
|
||||
bool timings_per_token = false;
|
||||
bool post_sampling_probs = false;
|
||||
|
||||
struct common_params_sampling sampling;
|
||||
struct common_params_speculative speculative;
|
||||
|
||||
// response formatting
|
||||
bool verbose = false;
|
||||
task_response_type res_type = TASK_RESPONSE_TYPE_NONE;
|
||||
std::string oaicompat_model;
|
||||
std::string oaicompat_cmpl_id;
|
||||
common_chat_syntax oaicompat_chat_syntax;
|
||||
|
||||
// Embeddings
|
||||
int32_t embd_normalize = 2; // (-1=none, 0=max absolute int16, 1=taxicab, 2=Euclidean/L2, >2=p-norm)
|
||||
|
||||
json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) const;
|
||||
json to_json(bool only_metrics = false) const;
|
||||
};
|
||||
|
||||
// struct for tracking the state of a task (e.g., for streaming)
|
||||
struct task_result_state {
|
||||
// tracking diffs for partial tool calls
|
||||
std::vector<common_chat_msg_diff> diffs;
|
||||
common_chat_syntax oaicompat_chat_syntax;
|
||||
common_chat_msg chat_msg;
|
||||
std::string generated_text; // append new chunks of generated text here
|
||||
std::vector<std::string> generated_tool_call_ids;
|
||||
|
||||
// for Anthropic API streaming: track content block state across chunks
|
||||
bool anthropic_thinking_block_started = false;
|
||||
bool anthropic_text_block_started = false;
|
||||
|
||||
task_result_state(const common_chat_syntax & oaicompat_chat_syntax)
|
||||
: oaicompat_chat_syntax(oaicompat_chat_syntax) {}
|
||||
|
||||
// parse partial tool calls and update the internal state
|
||||
common_chat_msg update_chat_msg(
|
||||
const std::string & text_added,
|
||||
bool is_partial,
|
||||
std::vector<common_chat_msg_diff> & diffs);
|
||||
};
|
||||
|
||||
struct server_task {
|
||||
int id = -1; // to be filled by server_queue
|
||||
|
||||
// TODO @ngxson : remove this field and implement a mapping task_id -> idx in the response_reader
|
||||
size_t index = 0; // used when there are multiple prompts (batch request)
|
||||
|
||||
// used by SERVER_TASK_TYPE_CANCEL
|
||||
int id_target = -1;
|
||||
int id_slot = -1;
|
||||
|
||||
// used by parallel sampling (multiple completions from same prompt)
|
||||
int id_parent = -1;
|
||||
// temporary store of child tasks for scheduling
|
||||
// note: accessing to elements is invalid after the task is moved to server_slot
|
||||
std::vector<server_task> child_tasks;
|
||||
|
||||
// used by SERVER_TASK_TYPE_INFERENCE
|
||||
task_params params;
|
||||
server_tokens tokens;
|
||||
|
||||
// only used by CLI, this delegates the tokenization to the server
|
||||
json cli_input = nullptr;
|
||||
std::vector<raw_buffer> cli_files;
|
||||
|
||||
server_task_type type;
|
||||
|
||||
// used by SERVER_TASK_TYPE_SLOT_SAVE, SERVER_TASK_TYPE_SLOT_RESTORE, SERVER_TASK_TYPE_SLOT_ERASE
|
||||
struct slot_action {
|
||||
int slot_id;
|
||||
std::string filename;
|
||||
std::string filepath;
|
||||
};
|
||||
slot_action slot_action;
|
||||
|
||||
// used by SERVER_TASK_TYPE_METRICS
|
||||
bool metrics_reset_bucket = false;
|
||||
|
||||
// used by SERVER_TASK_TYPE_SET_LORA
|
||||
std::map<int, float> set_lora; // mapping adapter ID -> scale
|
||||
|
||||
server_task() = default;
|
||||
|
||||
server_task(server_task_type type) : type(type) {}
|
||||
|
||||
int32_t n_tokens() const {
|
||||
return tokens.size();
|
||||
}
|
||||
|
||||
bool need_embd() const {
|
||||
switch (type) {
|
||||
case SERVER_TASK_TYPE_EMBEDDING:
|
||||
case SERVER_TASK_TYPE_RERANK:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool need_logits() const {
|
||||
switch (type) {
|
||||
case SERVER_TASK_TYPE_COMPLETION:
|
||||
case SERVER_TASK_TYPE_INFILL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool need_sampling() const {
|
||||
switch (type) {
|
||||
case SERVER_TASK_TYPE_COMPLETION:
|
||||
case SERVER_TASK_TYPE_INFILL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static task_params params_from_json_cmpl(
|
||||
const llama_vocab * vocab,
|
||||
const common_params & params_base,
|
||||
const int n_ctx_slot,
|
||||
const json & data);
|
||||
|
||||
// utility function
|
||||
static std::unordered_set<int> get_list_id(const std::vector<server_task> & tasks) {
|
||||
std::unordered_set<int> ids(tasks.size());
|
||||
for (size_t i = 0; i < tasks.size(); i++) {
|
||||
ids.insert(tasks[i].id);
|
||||
for (auto & child : tasks[i].child_tasks) {
|
||||
ids.insert(child.id);
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
void add_child(int id_parent, int id_child) {
|
||||
server_task copy;
|
||||
|
||||
copy.id = id_child;
|
||||
copy.id_parent = id_parent;
|
||||
copy.params = params;
|
||||
copy.type = type;
|
||||
copy.tokens = tokens.clone();
|
||||
copy.id_slot = -1; // child tasks cannot specify slot
|
||||
|
||||
// use different sampling seed for each child
|
||||
// note: https://github.com/ggml-org/llama.cpp/pull/18700#discussion_r2675115723
|
||||
if (copy.params.sampling.seed != LLAMA_DEFAULT_SEED) {
|
||||
copy.params.sampling.seed += (uint32_t)child_tasks.size() + 1;
|
||||
}
|
||||
|
||||
child_tasks.push_back(std::move(copy));
|
||||
}
|
||||
|
||||
// the task will be moved into queue, then onto slots
|
||||
// however, the state must be kept by caller (e.g., HTTP thread)
|
||||
task_result_state create_state() const {
|
||||
return task_result_state(params.oaicompat_chat_syntax);
|
||||
}
|
||||
|
||||
bool is_parent() const {
|
||||
return child_tasks.size() > 0;
|
||||
}
|
||||
|
||||
bool is_child() const {
|
||||
return id_parent != -1;
|
||||
}
|
||||
};
|
||||
|
||||
struct result_timings {
|
||||
int32_t cache_n = -1;
|
||||
|
||||
int32_t prompt_n = -1;
|
||||
double prompt_ms;
|
||||
double prompt_per_token_ms;
|
||||
double prompt_per_second;
|
||||
|
||||
int32_t predicted_n = -1;
|
||||
double predicted_ms;
|
||||
double predicted_per_token_ms;
|
||||
double predicted_per_second;
|
||||
|
||||
// Optional speculative metrics - only included when > 0
|
||||
int32_t draft_n = 0;
|
||||
int32_t draft_n_accepted = 0;
|
||||
|
||||
json to_json() const;
|
||||
};
|
||||
|
||||
struct result_prompt_progress {
|
||||
int32_t total = 0;
|
||||
int32_t cache = 0;
|
||||
int32_t processed = 0;
|
||||
int64_t time_ms = 0;
|
||||
|
||||
json to_json() const;
|
||||
};
|
||||
|
||||
struct server_task_result {
|
||||
int id = -1;
|
||||
int id_slot = -1;
|
||||
|
||||
// TODO @ngxson : remove this field and implement a mapping task_id -> idx in the response_reader
|
||||
size_t index = 0; // to be used for batched tasks
|
||||
|
||||
virtual bool is_error() {
|
||||
// only used by server_task_result_error
|
||||
return false;
|
||||
}
|
||||
virtual bool is_stop() {
|
||||
// only used by server_task_result_cmpl_*
|
||||
return true;
|
||||
}
|
||||
virtual void update(task_result_state &) {
|
||||
// only used by server_task_result_cmpl_*
|
||||
}
|
||||
virtual json to_json() = 0;
|
||||
virtual ~server_task_result() = default;
|
||||
};
|
||||
|
||||
// using shared_ptr for polymorphism of server_task_result
|
||||
using server_task_result_ptr = std::unique_ptr<server_task_result>;
|
||||
|
||||
struct completion_token_output {
|
||||
llama_token tok;
|
||||
float prob;
|
||||
std::string text_to_send;
|
||||
struct prob_info {
|
||||
llama_token tok;
|
||||
std::string txt;
|
||||
float prob;
|
||||
};
|
||||
std::vector<prob_info> probs;
|
||||
|
||||
json to_json(bool post_sampling_probs) const;
|
||||
|
||||
static json probs_vector_to_json(const std::vector<completion_token_output> & probs, bool post_sampling_probs);
|
||||
|
||||
static float logarithm(float x);
|
||||
|
||||
static std::vector<unsigned char> str_to_bytes(const std::string & str);
|
||||
|
||||
};
|
||||
|
||||
struct server_task_result_cmpl_final : server_task_result {
|
||||
std::string content;
|
||||
llama_tokens tokens;
|
||||
|
||||
bool stream;
|
||||
bool include_usage;
|
||||
result_timings timings;
|
||||
std::string prompt;
|
||||
|
||||
bool truncated;
|
||||
int32_t n_decoded;
|
||||
int32_t n_prompt_tokens;
|
||||
int32_t n_tokens_cached;
|
||||
bool has_new_line;
|
||||
std::string stopping_word;
|
||||
stop_type stop = STOP_TYPE_NONE;
|
||||
|
||||
bool post_sampling_probs;
|
||||
std::vector<completion_token_output> probs_output;
|
||||
std::vector<std::string> response_fields;
|
||||
|
||||
task_params generation_params;
|
||||
|
||||
// response formatting
|
||||
bool verbose = false;
|
||||
task_response_type res_type = TASK_RESPONSE_TYPE_NONE;
|
||||
std::string oaicompat_model;
|
||||
std::string oaicompat_cmpl_id;
|
||||
common_chat_msg oaicompat_msg; // to be populated by update()
|
||||
|
||||
std::vector<common_chat_msg_diff> oaicompat_msg_diffs; // to be populated by update()
|
||||
bool is_updated = false;
|
||||
|
||||
virtual bool is_stop() override {
|
||||
return true; // in stream mode, final responses are considered stop
|
||||
}
|
||||
|
||||
virtual json to_json() override;
|
||||
|
||||
virtual void update(task_result_state & state) override {
|
||||
is_updated = true;
|
||||
oaicompat_msg = state.update_chat_msg(content, false, oaicompat_msg_diffs);
|
||||
}
|
||||
|
||||
json to_json_non_oaicompat();
|
||||
|
||||
json to_json_oaicompat();
|
||||
|
||||
json to_json_oaicompat_chat();
|
||||
|
||||
json to_json_oaicompat_chat_stream();
|
||||
|
||||
json to_json_anthropic();
|
||||
|
||||
json to_json_anthropic_stream();
|
||||
};
|
||||
|
||||
struct server_task_result_cmpl_partial : server_task_result {
|
||||
std::string content;
|
||||
llama_tokens tokens;
|
||||
|
||||
int32_t n_decoded;
|
||||
int32_t n_prompt_tokens;
|
||||
|
||||
bool post_sampling_probs;
|
||||
bool is_progress = false;
|
||||
completion_token_output prob_output;
|
||||
result_timings timings;
|
||||
result_prompt_progress progress;
|
||||
|
||||
// response formatting
|
||||
bool verbose = false;
|
||||
task_response_type res_type = TASK_RESPONSE_TYPE_NONE;
|
||||
std::string oaicompat_model;
|
||||
std::string oaicompat_cmpl_id;
|
||||
std::vector<common_chat_msg_diff> oaicompat_msg_diffs; // to be populated by update()
|
||||
bool is_updated = false;
|
||||
|
||||
// for Anthropic API: track if any reasoning content has been generated
|
||||
bool anthropic_has_reasoning = false;
|
||||
// Streaming state copied from task_result_state for this chunk
|
||||
bool anthropic_thinking_block_started = false;
|
||||
bool anthropic_text_block_started = false;
|
||||
|
||||
virtual bool is_stop() override {
|
||||
return false; // in stream mode, partial responses are not considered stop
|
||||
}
|
||||
|
||||
virtual json to_json() override;
|
||||
|
||||
virtual void update(task_result_state & state) override {
|
||||
is_updated = true;
|
||||
state.update_chat_msg(content, true, oaicompat_msg_diffs);
|
||||
// track if the accumulated message has any reasoning content
|
||||
anthropic_has_reasoning = !state.chat_msg.reasoning_content.empty();
|
||||
|
||||
// Copy current state for use in to_json_anthropic() (reflects state BEFORE this chunk)
|
||||
anthropic_thinking_block_started = state.anthropic_thinking_block_started;
|
||||
anthropic_text_block_started = state.anthropic_text_block_started;
|
||||
|
||||
// Pre-compute state updates based on diffs (for next chunk)
|
||||
for (const auto & diff : oaicompat_msg_diffs) {
|
||||
if (!diff.reasoning_content_delta.empty() && !state.anthropic_thinking_block_started) {
|
||||
state.anthropic_thinking_block_started = true;
|
||||
}
|
||||
if (!diff.content_delta.empty() && !state.anthropic_text_block_started) {
|
||||
state.anthropic_text_block_started = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json to_json_non_oaicompat();
|
||||
|
||||
json to_json_oaicompat();
|
||||
|
||||
json to_json_oaicompat_chat();
|
||||
|
||||
json to_json_anthropic();
|
||||
};
|
||||
|
||||
struct server_task_result_embd : server_task_result {
|
||||
std::vector<std::vector<float>> embedding;
|
||||
|
||||
int32_t n_tokens;
|
||||
|
||||
// response formatting
|
||||
task_response_type res_type = TASK_RESPONSE_TYPE_NONE;
|
||||
|
||||
virtual json to_json() override;
|
||||
|
||||
json to_json_non_oaicompat();
|
||||
|
||||
json to_json_oaicompat();
|
||||
};
|
||||
|
||||
struct server_task_result_rerank : server_task_result {
|
||||
float score = -1e6;
|
||||
|
||||
int32_t n_tokens;
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_error : server_task_result {
|
||||
error_type err_type = ERROR_TYPE_SERVER;
|
||||
std::string err_msg;
|
||||
|
||||
// for ERROR_TYPE_EXCEED_CONTEXT_SIZE
|
||||
int32_t n_prompt_tokens = 0;
|
||||
int32_t n_ctx = 0;
|
||||
|
||||
virtual bool is_error() override {
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_metrics : server_task_result {
|
||||
int n_idle_slots;
|
||||
int n_processing_slots;
|
||||
int n_tasks_deferred;
|
||||
int64_t t_start;
|
||||
|
||||
// TODO: somehow reuse server_metrics in the future, instead of duplicating the fields
|
||||
uint64_t n_prompt_tokens_processed_total = 0;
|
||||
uint64_t t_prompt_processing_total = 0;
|
||||
uint64_t n_tokens_predicted_total = 0;
|
||||
uint64_t t_tokens_generation_total = 0;
|
||||
|
||||
uint64_t n_tokens_max = 0;
|
||||
|
||||
uint64_t n_prompt_tokens_processed = 0;
|
||||
uint64_t t_prompt_processing = 0;
|
||||
|
||||
uint64_t n_tokens_predicted = 0;
|
||||
uint64_t t_tokens_generation = 0;
|
||||
|
||||
uint64_t n_decode_total = 0;
|
||||
uint64_t n_busy_slots_total = 0;
|
||||
|
||||
// while we can also use std::vector<server_slot> this requires copying the slot object which can be quite messy
|
||||
// therefore, we use json to temporarily store the slot.to_json() result
|
||||
json slots_data = json::array();
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_slot_save_load : server_task_result {
|
||||
std::string filename;
|
||||
bool is_save; // true = save, false = load
|
||||
|
||||
size_t n_tokens;
|
||||
size_t n_bytes;
|
||||
double t_ms;
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_slot_erase : server_task_result {
|
||||
size_t n_erased;
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_get_lora : server_task_result {
|
||||
struct lora {
|
||||
common_adapter_lora_info info;
|
||||
std::string alora_invocation_string;
|
||||
llama_tokens alora_invocation_tokens;
|
||||
};
|
||||
std::vector<lora> loras;
|
||||
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_task_result_apply_lora : server_task_result {
|
||||
virtual json to_json() override;
|
||||
};
|
||||
|
||||
struct server_prompt_checkpoint {
|
||||
llama_pos pos_min;
|
||||
llama_pos pos_max;
|
||||
|
||||
std::vector<uint8_t> data;
|
||||
|
||||
size_t size() const {
|
||||
return data.size();
|
||||
}
|
||||
};
|
||||
|
||||
struct server_prompt {
|
||||
server_tokens tokens;
|
||||
|
||||
std::vector<uint8_t> data;
|
||||
|
||||
std::list<server_prompt_checkpoint> checkpoints;
|
||||
|
||||
size_t size() const {
|
||||
size_t res = data.size();
|
||||
|
||||
for (const auto & checkpoint : checkpoints) {
|
||||
res += checkpoint.size();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int n_tokens() const {
|
||||
return tokens.size();
|
||||
}
|
||||
|
||||
server_prompt clone() const {
|
||||
return server_prompt {
|
||||
tokens.clone(),
|
||||
data,
|
||||
checkpoints
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
struct server_prompt_cache {
|
||||
server_prompt_cache(int32_t limit_size_mib, size_t limit_tokens) {
|
||||
this->limit_size = 1024ull*1024ull*(limit_size_mib < 0 ? 0 : limit_size_mib);
|
||||
this->limit_tokens = limit_tokens;
|
||||
}
|
||||
|
||||
std::list<server_prompt> states;
|
||||
|
||||
// in bytes, 0 = no limit
|
||||
size_t limit_size = 0;
|
||||
|
||||
// in tokens, 0 = no limit
|
||||
size_t limit_tokens = 0;
|
||||
|
||||
size_t size() const;
|
||||
|
||||
size_t n_tokens() const;
|
||||
|
||||
server_prompt * alloc(const server_prompt & prompt, size_t state_size);
|
||||
|
||||
bool load(server_prompt & prompt, const server_tokens & tokens_new, llama_context * ctx, int32_t id_slot);
|
||||
|
||||
void update();
|
||||
};
|
||||
320
tools/server/server.cpp
Normal file
@@ -0,0 +1,320 @@
|
||||
#include "server-context.h"
|
||||
#include "server-http.h"
|
||||
#include "server-models.h"
|
||||
|
||||
#include "arg.h"
|
||||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <exception>
|
||||
#include <signal.h>
|
||||
#include <thread> // for std::thread::hardware_concurrency
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
static std::function<void(int)> shutdown_handler;
|
||||
static std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
|
||||
|
||||
static inline void signal_handler(int signal) {
|
||||
if (is_terminating.test_and_set()) {
|
||||
// in case it hangs, we can force terminate the server by hitting Ctrl+C twice
|
||||
// this is for better developer experience, we can remove when the server is stable enough
|
||||
fprintf(stderr, "Received second interrupt, terminating immediately.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
shutdown_handler(signal);
|
||||
}
|
||||
|
||||
// wrapper function that handles exceptions and logs errors
|
||||
// this is to make sure handler_t never throws exceptions; instead, it returns an error response
|
||||
static server_http_context::handler_t ex_wrapper(server_http_context::handler_t func) {
|
||||
return [func = std::move(func)](const server_http_req & req) -> server_http_res_ptr {
|
||||
std::string message;
|
||||
error_type error;
|
||||
try {
|
||||
return func(req);
|
||||
} catch (const std::invalid_argument & e) {
|
||||
// treat invalid_argument as invalid request (400)
|
||||
error = ERROR_TYPE_INVALID_REQUEST;
|
||||
message = e.what();
|
||||
} catch (const std::exception & e) {
|
||||
// treat other exceptions as server error (500)
|
||||
error = ERROR_TYPE_SERVER;
|
||||
message = e.what();
|
||||
} catch (...) {
|
||||
error = ERROR_TYPE_SERVER;
|
||||
message = "unknown error";
|
||||
}
|
||||
|
||||
auto res = std::make_unique<server_http_res>();
|
||||
res->status = 500;
|
||||
try {
|
||||
json error_data = format_error_response(message, error);
|
||||
res->status = json_value(error_data, "code", 500);
|
||||
res->data = safe_json_to_str({{ "error", error_data }});
|
||||
SRV_WRN("got exception: %s\n", res->data.c_str());
|
||||
} catch (const std::exception & e) {
|
||||
SRV_ERR("got another exception: %s | while handling exception: %s\n", e.what(), message.c_str());
|
||||
res->data = "Internal Server Error";
|
||||
}
|
||||
return res;
|
||||
};
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
// own arguments required by this example
|
||||
common_params params;
|
||||
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// validate batch size for embeddings
|
||||
// embeddings require all tokens to be processed in a single ubatch
|
||||
// see https://github.com/ggml-org/llama.cpp/issues/12836
|
||||
if (params.embedding && params.n_batch > params.n_ubatch) {
|
||||
LOG_WRN("%s: embeddings enabled with n_batch (%d) > n_ubatch (%d)\n", __func__, params.n_batch, params.n_ubatch);
|
||||
LOG_WRN("%s: setting n_batch = n_ubatch = %d to avoid assertion failure\n", __func__, params.n_ubatch);
|
||||
params.n_batch = params.n_ubatch;
|
||||
}
|
||||
|
||||
if (params.n_parallel < 0) {
|
||||
LOG_INF("%s: n_parallel is set to auto, using n_parallel = 4 and kv_unified = true\n", __func__);
|
||||
|
||||
params.n_parallel = 4;
|
||||
params.kv_unified = true;
|
||||
}
|
||||
|
||||
// for consistency between server router mode and single-model mode, we set the same model name as alias
|
||||
if (params.model_alias.empty() && !params.model.name.empty()) {
|
||||
params.model_alias = params.model.name;
|
||||
}
|
||||
|
||||
common_init();
|
||||
|
||||
// struct that contains llama context and inference
|
||||
server_context ctx_server;
|
||||
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
LOG_INF("\n");
|
||||
|
||||
server_http_context ctx_http;
|
||||
if (!ctx_http.init(params)) {
|
||||
LOG_ERR("%s: failed to initialize HTTP server\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
//
|
||||
// Router
|
||||
//
|
||||
|
||||
// register API routes
|
||||
server_routes routes(params, ctx_server);
|
||||
|
||||
bool is_router_server = params.model.path.empty();
|
||||
std::optional<server_models_routes> models_routes{};
|
||||
if (is_router_server) {
|
||||
// setup server instances manager
|
||||
try {
|
||||
models_routes.emplace(params, argc, argv);
|
||||
} catch (const std::exception & e) {
|
||||
LOG_ERR("%s: failed to initialize router models: %s\n", __func__, e.what());
|
||||
return 1;
|
||||
}
|
||||
|
||||
// proxy handlers
|
||||
// note: routes.get_health stays the same
|
||||
routes.get_metrics = models_routes->proxy_get;
|
||||
routes.post_props = models_routes->proxy_post;
|
||||
routes.get_api_show = models_routes->proxy_get;
|
||||
routes.post_completions = models_routes->proxy_post;
|
||||
routes.post_completions_oai = models_routes->proxy_post;
|
||||
routes.post_chat_completions = models_routes->proxy_post;
|
||||
routes.post_anthropic_messages = models_routes->proxy_post;
|
||||
routes.post_anthropic_count_tokens = models_routes->proxy_post;
|
||||
routes.post_infill = models_routes->proxy_post;
|
||||
routes.post_embeddings = models_routes->proxy_post;
|
||||
routes.post_embeddings_oai = models_routes->proxy_post;
|
||||
routes.post_rerank = models_routes->proxy_post;
|
||||
routes.post_tokenize = models_routes->proxy_post;
|
||||
routes.post_detokenize = models_routes->proxy_post;
|
||||
routes.post_apply_template = models_routes->proxy_post;
|
||||
routes.get_lora_adapters = models_routes->proxy_get;
|
||||
routes.post_lora_adapters = models_routes->proxy_post;
|
||||
routes.get_slots = models_routes->proxy_get;
|
||||
routes.post_slots = models_routes->proxy_post;
|
||||
|
||||
// custom routes for router
|
||||
routes.get_props = models_routes->get_router_props;
|
||||
routes.get_models = models_routes->get_router_models;
|
||||
ctx_http.post("/models/load", ex_wrapper(models_routes->post_router_models_load));
|
||||
ctx_http.post("/models/unload", ex_wrapper(models_routes->post_router_models_unload));
|
||||
}
|
||||
|
||||
ctx_http.get ("/health", ex_wrapper(routes.get_health)); // public endpoint (no API key check)
|
||||
ctx_http.get ("/v1/health", ex_wrapper(routes.get_health)); // public endpoint (no API key check)
|
||||
ctx_http.get ("/metrics", ex_wrapper(routes.get_metrics));
|
||||
ctx_http.get ("/props", ex_wrapper(routes.get_props));
|
||||
ctx_http.post("/props", ex_wrapper(routes.post_props));
|
||||
ctx_http.post("/api/show", ex_wrapper(routes.get_api_show));
|
||||
ctx_http.get ("/models", ex_wrapper(routes.get_models)); // public endpoint (no API key check)
|
||||
ctx_http.get ("/v1/models", ex_wrapper(routes.get_models)); // public endpoint (no API key check)
|
||||
ctx_http.get ("/api/tags", ex_wrapper(routes.get_models)); // ollama specific endpoint. public endpoint (no API key check)
|
||||
ctx_http.post("/completion", ex_wrapper(routes.post_completions)); // legacy
|
||||
ctx_http.post("/completions", ex_wrapper(routes.post_completions));
|
||||
ctx_http.post("/v1/completions", ex_wrapper(routes.post_completions_oai));
|
||||
ctx_http.post("/chat/completions", ex_wrapper(routes.post_chat_completions));
|
||||
ctx_http.post("/v1/chat/completions", ex_wrapper(routes.post_chat_completions));
|
||||
ctx_http.post("/api/chat", ex_wrapper(routes.post_chat_completions)); // ollama specific endpoint
|
||||
ctx_http.post("/v1/messages", ex_wrapper(routes.post_anthropic_messages)); // anthropic messages API
|
||||
ctx_http.post("/v1/messages/count_tokens", ex_wrapper(routes.post_anthropic_count_tokens)); // anthropic token counting
|
||||
ctx_http.post("/infill", ex_wrapper(routes.post_infill));
|
||||
ctx_http.post("/embedding", ex_wrapper(routes.post_embeddings)); // legacy
|
||||
ctx_http.post("/embeddings", ex_wrapper(routes.post_embeddings));
|
||||
ctx_http.post("/v1/embeddings", ex_wrapper(routes.post_embeddings_oai));
|
||||
ctx_http.post("/rerank", ex_wrapper(routes.post_rerank));
|
||||
ctx_http.post("/reranking", ex_wrapper(routes.post_rerank));
|
||||
ctx_http.post("/v1/rerank", ex_wrapper(routes.post_rerank));
|
||||
ctx_http.post("/v1/reranking", ex_wrapper(routes.post_rerank));
|
||||
ctx_http.post("/tokenize", ex_wrapper(routes.post_tokenize));
|
||||
ctx_http.post("/detokenize", ex_wrapper(routes.post_detokenize));
|
||||
ctx_http.post("/apply-template", ex_wrapper(routes.post_apply_template));
|
||||
// LoRA adapters hotswap
|
||||
ctx_http.get ("/lora-adapters", ex_wrapper(routes.get_lora_adapters));
|
||||
ctx_http.post("/lora-adapters", ex_wrapper(routes.post_lora_adapters));
|
||||
// Save & load slots
|
||||
ctx_http.get ("/slots", ex_wrapper(routes.get_slots));
|
||||
ctx_http.post("/slots/:id_slot", ex_wrapper(routes.post_slots));
|
||||
|
||||
//
|
||||
// Start the server
|
||||
//
|
||||
|
||||
std::function<void()> clean_up;
|
||||
|
||||
if (is_router_server) {
|
||||
LOG_INF("%s: starting router server, no model will be loaded in this process\n", __func__);
|
||||
|
||||
clean_up = [&models_routes]() {
|
||||
SRV_INF("%s: cleaning up before exit...\n", __func__);
|
||||
if (models_routes.has_value()) {
|
||||
models_routes->models.unload_all();
|
||||
}
|
||||
llama_backend_free();
|
||||
};
|
||||
|
||||
if (!ctx_http.start()) {
|
||||
clean_up();
|
||||
LOG_ERR("%s: exiting due to HTTP server error\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
ctx_http.is_ready.store(true);
|
||||
|
||||
shutdown_handler = [&](int) {
|
||||
ctx_http.stop();
|
||||
};
|
||||
|
||||
} else {
|
||||
// setup clean up function, to be called before exit
|
||||
clean_up = [&ctx_http, &ctx_server]() {
|
||||
SRV_INF("%s: cleaning up before exit...\n", __func__);
|
||||
ctx_http.stop();
|
||||
ctx_server.terminate();
|
||||
llama_backend_free();
|
||||
};
|
||||
|
||||
// start the HTTP server before loading the model to be able to serve /health requests
|
||||
if (!ctx_http.start()) {
|
||||
clean_up();
|
||||
LOG_ERR("%s: exiting due to HTTP server error\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// load the model
|
||||
LOG_INF("%s: loading model\n", __func__);
|
||||
|
||||
if (!ctx_server.load_model(params)) {
|
||||
clean_up();
|
||||
if (ctx_http.thread.joinable()) {
|
||||
ctx_http.thread.join();
|
||||
}
|
||||
LOG_ERR("%s: exiting due to model loading error\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
routes.update_meta(ctx_server);
|
||||
ctx_http.is_ready.store(true);
|
||||
|
||||
LOG_INF("%s: model loaded\n", __func__);
|
||||
|
||||
shutdown_handler = [&](int) {
|
||||
// this will unblock start_loop()
|
||||
ctx_server.terminate();
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: refactor in common/console
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
struct sigaction sigint_action;
|
||||
sigint_action.sa_handler = signal_handler;
|
||||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
sigaction(SIGTERM, &sigint_action, NULL);
|
||||
#elif defined (_WIN32)
|
||||
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
||||
};
|
||||
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||
#endif
|
||||
|
||||
if (is_router_server) {
|
||||
LOG_INF("%s: router server is listening on %s\n", __func__, ctx_http.listening_address.c_str());
|
||||
LOG_INF("%s: NOTE: router mode is experimental\n", __func__);
|
||||
LOG_INF("%s: it is not recommended to use this mode in untrusted environments\n", __func__);
|
||||
if (ctx_http.thread.joinable()) {
|
||||
ctx_http.thread.join(); // keep the main thread alive
|
||||
}
|
||||
|
||||
// when the HTTP server stops, clean up and exit
|
||||
clean_up();
|
||||
} else {
|
||||
LOG_INF("%s: server is listening on %s\n", __func__, ctx_http.listening_address.c_str());
|
||||
LOG_INF("%s: starting the main loop...\n", __func__);
|
||||
|
||||
// optionally, notify router server that this instance is ready
|
||||
const char * router_port = std::getenv("LLAMA_SERVER_ROUTER_PORT");
|
||||
std::thread monitor_thread;
|
||||
if (router_port != nullptr) {
|
||||
monitor_thread = server_models::setup_child_server(shutdown_handler);
|
||||
}
|
||||
|
||||
// this call blocks the main thread until queue_tasks.terminate() is called
|
||||
ctx_server.start_loop();
|
||||
|
||||
clean_up();
|
||||
if (ctx_http.thread.joinable()) {
|
||||
ctx_http.thread.join();
|
||||
}
|
||||
if (monitor_thread.joinable()) {
|
||||
monitor_thread.join();
|
||||
}
|
||||
|
||||
auto * ll_ctx = ctx_server.get_llama_context();
|
||||
if (ll_ctx != nullptr) {
|
||||
llama_memory_breakdown_print(ll_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
2
tools/server/tests/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.venv
|
||||
tmp
|
||||
96
tools/server/tests/README.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Server tests
|
||||
|
||||
Python based server tests scenario using [pytest](https://docs.pytest.org/en/stable/).
|
||||
|
||||
Tests target GitHub workflows job runners with 4 vCPU.
|
||||
|
||||
Note: If the host architecture inference speed is faster than GitHub runners one, parallel scenario may randomly fail.
|
||||
To mitigate it, you can increase values in `n_predict`, `kv_size`.
|
||||
|
||||
### Install dependencies
|
||||
|
||||
`pip install -r requirements.txt`
|
||||
|
||||
### Run tests
|
||||
|
||||
1. Build the server
|
||||
|
||||
```shell
|
||||
cd ../../..
|
||||
cmake -B build
|
||||
cmake --build build --target llama-server
|
||||
```
|
||||
|
||||
2. Start the test: `./tests.sh`
|
||||
|
||||
It's possible to override some scenario steps values with environment variables:
|
||||
|
||||
| variable | description |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------|
|
||||
| `PORT` | `context.server_port` to set the listening port of the server during scenario, default: `8080` |
|
||||
| `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/llama-server` |
|
||||
| `DEBUG` | to enable steps and server verbose mode `--verbose` |
|
||||
| `N_GPU_LAYERS` | number of model layers to offload to VRAM `-ngl --n-gpu-layers` |
|
||||
| `LLAMA_CACHE` | by default server tests re-download models to the `tmp` subfolder. Set this to your cache (e.g. `$HOME/Library/Caches/llama.cpp` on Mac or `$HOME/.cache/llama.cpp` on Unix) to avoid this |
|
||||
|
||||
To run slow tests (will download many models, make sure to set `LLAMA_CACHE` if needed):
|
||||
|
||||
```shell
|
||||
SLOW_TESTS=1 ./tests.sh
|
||||
```
|
||||
|
||||
To run with stdout/stderr display in real time (verbose output, but useful for debugging):
|
||||
|
||||
```shell
|
||||
DEBUG=1 ./tests.sh -s -v -x
|
||||
```
|
||||
|
||||
To run all the tests in a file:
|
||||
|
||||
```shell
|
||||
./tests.sh unit/test_chat_completion.py -v -x
|
||||
```
|
||||
|
||||
To run a single test:
|
||||
|
||||
```shell
|
||||
./tests.sh unit/test_chat_completion.py::test_invalid_chat_completion_req
|
||||
```
|
||||
|
||||
Hint: You can compile and run test in single command, useful for local developement:
|
||||
|
||||
```shell
|
||||
cmake --build build -j --target llama-server && ./tools/server/tests/tests.sh
|
||||
```
|
||||
|
||||
To see all available arguments, please refer to [pytest documentation](https://docs.pytest.org/en/stable/how-to/usage.html)
|
||||
|
||||
### Debugging external llama-server
|
||||
It can sometimes be useful to run the server in a debugger when invesigating test
|
||||
failures. To do this, the environment variable `DEBUG_EXTERNAL=1` can be set
|
||||
which will cause the test to skip starting a llama-server itself. Instead, the
|
||||
server can be started in a debugger.
|
||||
|
||||
Example using `gdb`:
|
||||
```console
|
||||
$ gdb --args ../../../build/bin/llama-server \
|
||||
--host 127.0.0.1 --port 8080 \
|
||||
--temp 0.8 --seed 42 \
|
||||
--hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf \
|
||||
--batch-size 32 --no-slots --alias tinyllama-2 --ctx-size 512 \
|
||||
--parallel 2 --n-predict 64
|
||||
```
|
||||
And a break point can be set in before running:
|
||||
```console
|
||||
(gdb) br server.cpp:4604
|
||||
(gdb) r
|
||||
main: server is listening on http://127.0.0.1:8080 - starting the main loop
|
||||
srv update_slots: all slots are idle
|
||||
```
|
||||
|
||||
And then the test in question can be run in another terminal:
|
||||
```console
|
||||
(venv) $ env DEBUG_EXTERNAL=1 ./tests.sh unit/test_chat_completion.py -v -x
|
||||
```
|
||||
And this should trigger the breakpoint and allow inspection of the server state
|
||||
in the debugger terminal.
|
||||
21
tools/server/tests/conftest.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
|
||||
# ref: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test
|
||||
@pytest.fixture(autouse=True)
|
||||
def stop_server_after_each_test():
|
||||
# do nothing before each test
|
||||
yield
|
||||
# stop all servers after each test
|
||||
instances = set(
|
||||
server_instances
|
||||
) # copy the set to prevent 'Set changed size during iteration'
|
||||
for server in instances:
|
||||
server.stop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def do_something():
|
||||
# this will be run once per test session, before any tests
|
||||
ServerPreset.load_all()
|
||||
4
tools/server/tests/pytest.ini
Normal file
@@ -0,0 +1,4 @@
|
||||
[pytest]
|
||||
markers =
|
||||
slow: marks tests as slow (deselect with '-m "not slow"')
|
||||
serial
|
||||
8
tools/server/tests/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
aiohttp~=3.9.3
|
||||
pytest~=8.3.3
|
||||
huggingface_hub>=0.34.0,<1.0
|
||||
numpy~=1.26.4
|
||||
openai~=1.55.3
|
||||
prometheus-client~=0.20.0
|
||||
requests~=2.32.3
|
||||
wget~=3.2
|
||||
23
tools/server/tests/tests.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# make sure we are in the right directory
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
cd $SCRIPT_DIR
|
||||
|
||||
set -eu
|
||||
|
||||
if [[ "${SLOW_TESTS:-0}" == 1 ]]; then
|
||||
# Slow tests for tool calls need quite a few models ahead of time to avoid timing out.
|
||||
python $SCRIPT_DIR/../../../scripts/fetch_server_test_models.py
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
if [[ "${SLOW_TESTS:-0}" == 1 ]]; then
|
||||
pytest -v -x
|
||||
else
|
||||
pytest -v -x -m "not slow"
|
||||
fi
|
||||
else
|
||||
pytest "$@"
|
||||
fi
|
||||
96
tools/server/tests/unit/test_basic.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import pytest
|
||||
import requests
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
def test_server_start_simple():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", "/health")
|
||||
assert res.status_code == 200
|
||||
|
||||
|
||||
def test_server_props():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", "/props")
|
||||
assert res.status_code == 200
|
||||
assert ".gguf" in res.body["model_path"]
|
||||
assert res.body["total_slots"] == server.n_slots
|
||||
default_val = res.body["default_generation_settings"]
|
||||
assert server.n_ctx is not None and server.n_slots is not None
|
||||
assert default_val["n_ctx"] == server.n_ctx / server.n_slots
|
||||
assert default_val["params"]["seed"] == server.seed
|
||||
|
||||
|
||||
def test_server_models():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", "/models")
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["data"]) == 1
|
||||
assert res.body["data"][0]["id"] == server.model_alias
|
||||
|
||||
|
||||
def test_server_slots():
|
||||
global server
|
||||
|
||||
# without slots endpoint enabled, this should return error
|
||||
server.server_slots = False
|
||||
server.start()
|
||||
res = server.make_request("GET", "/slots")
|
||||
assert res.status_code == 501 # ERROR_TYPE_NOT_SUPPORTED
|
||||
assert "error" in res.body
|
||||
server.stop()
|
||||
|
||||
# with slots endpoint enabled, this should return slots info
|
||||
server.server_slots = True
|
||||
server.n_slots = 2
|
||||
server.start()
|
||||
res = server.make_request("GET", "/slots")
|
||||
assert res.status_code == 200
|
||||
assert len(res.body) == server.n_slots
|
||||
assert server.n_ctx is not None and server.n_slots is not None
|
||||
assert res.body[0]["n_ctx"] == server.n_ctx / server.n_slots
|
||||
assert "params" not in res.body[0]
|
||||
|
||||
|
||||
def test_load_split_model():
|
||||
global server
|
||||
server.offline = False
|
||||
server.model_hf_repo = "ggml-org/models"
|
||||
server.model_hf_file = "tinyllamas/split/stories15M-q8_0-00001-of-00003.gguf"
|
||||
server.model_alias = "tinyllama-split"
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 16,
|
||||
"prompt": "Hello",
|
||||
"temperature": 0.0,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(little|girl)+", res.body["content"])
|
||||
|
||||
|
||||
def test_no_webui():
|
||||
global server
|
||||
# default: webui enabled
|
||||
server.start()
|
||||
url = f"http://{server.server_host}:{server.server_port}"
|
||||
res = requests.get(url)
|
||||
assert res.status_code == 200
|
||||
assert "<!doctype html>" in res.text
|
||||
server.stop()
|
||||
|
||||
# with --no-webui
|
||||
server.no_webui = True
|
||||
server.start()
|
||||
res = requests.get(url)
|
||||
assert res.status_code == 404
|
||||
512
tools/server/tests/unit/test_chat_completion.py
Normal file
@@ -0,0 +1,512 @@
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
from utils import *
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason,jinja,chat_template",
|
||||
[
|
||||
(None, "Book", "Hey", 8, "But she couldn't", 69, 8, "length", False, None),
|
||||
(None, "Book", "Hey", 8, "But she couldn't", 69, 8, "length", True, None),
|
||||
(None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", False, None),
|
||||
(None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, None),
|
||||
(None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, 'chatml'),
|
||||
(None, "Book", "What is the best book", 8, "^ blue", 23, 8, "length", True, "This is not a chat template, it is"),
|
||||
("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length", False, None),
|
||||
("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length", True, None),
|
||||
(None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", False, None),
|
||||
(None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", True, None),
|
||||
]
|
||||
)
|
||||
def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template):
|
||||
global server
|
||||
server.jinja = jinja
|
||||
server.chat_template = chat_template
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt},
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "cmpl" in res.body["id"] # make sure the completion id has the expected format
|
||||
assert res.body["system_fingerprint"].startswith("b")
|
||||
# we no longer reflect back the model name, see https://github.com/ggml-org/llama.cpp/pull/17668
|
||||
# assert res.body["model"] == model if model is not None else server.model_alias
|
||||
assert res.body["usage"]["prompt_tokens"] == n_prompt
|
||||
assert res.body["usage"]["completion_tokens"] == n_predicted
|
||||
choice = res.body["choices"][0]
|
||||
assert "assistant" == choice["message"]["role"]
|
||||
assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
|
||||
assert choice["finish_reason"] == finish_reason
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason",
|
||||
[
|
||||
("Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length"),
|
||||
("You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length"),
|
||||
]
|
||||
)
|
||||
def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason):
|
||||
global server
|
||||
server.model_alias = "llama-test-model"
|
||||
server.start()
|
||||
res = server.make_stream_request("POST", "/chat/completions", data={
|
||||
"max_tokens": max_tokens,
|
||||
"messages": [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt},
|
||||
],
|
||||
"stream": True,
|
||||
})
|
||||
content = ""
|
||||
last_cmpl_id = None
|
||||
for i, data in enumerate(res):
|
||||
if data["choices"]:
|
||||
choice = data["choices"][0]
|
||||
if i == 0:
|
||||
# Check first role message for stream=True
|
||||
assert choice["delta"]["content"] is None
|
||||
assert choice["delta"]["role"] == "assistant"
|
||||
else:
|
||||
assert "role" not in choice["delta"]
|
||||
assert data["system_fingerprint"].startswith("b")
|
||||
assert data["model"] == "llama-test-model"
|
||||
if last_cmpl_id is None:
|
||||
last_cmpl_id = data["id"]
|
||||
assert last_cmpl_id == data["id"] # make sure the completion id is the same for all events in the stream
|
||||
if choice["finish_reason"] in ["stop", "length"]:
|
||||
assert "content" not in choice["delta"]
|
||||
assert match_regex(re_content, content)
|
||||
assert choice["finish_reason"] == finish_reason
|
||||
else:
|
||||
assert choice["finish_reason"] is None
|
||||
content += choice["delta"]["content"] or ''
|
||||
else:
|
||||
assert data["usage"]["prompt_tokens"] == n_prompt
|
||||
assert data["usage"]["completion_tokens"] == n_predicted
|
||||
|
||||
|
||||
def test_chat_completion_with_openai_library():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
messages=[
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
],
|
||||
max_tokens=8,
|
||||
seed=42,
|
||||
temperature=0.8,
|
||||
)
|
||||
assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b")
|
||||
assert res.choices[0].finish_reason == "length"
|
||||
assert res.choices[0].message.content is not None
|
||||
assert match_regex("(Suddenly)+", res.choices[0].message.content)
|
||||
|
||||
|
||||
def test_chat_template():
|
||||
global server
|
||||
server.chat_template = "llama3"
|
||||
server.debug = True # to get the "__verbose" object in the response
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 8,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
]
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "__verbose" in res.body
|
||||
assert res.body["__verbose"]["prompt"] == "<s> <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("prefill,re_prefill", [
|
||||
("Whill", "Whill"),
|
||||
([{"type": "text", "text": "Wh"}, {"type": "text", "text": "ill"}], "Whill"),
|
||||
])
|
||||
def test_chat_template_assistant_prefill(prefill, re_prefill):
|
||||
global server
|
||||
server.chat_template = "llama3"
|
||||
server.debug = True # to get the "__verbose" object in the response
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 8,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
{"role": "assistant", "content": prefill},
|
||||
]
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "__verbose" in res.body
|
||||
assert res.body["__verbose"]["prompt"] == f"<s> <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{re_prefill}"
|
||||
|
||||
|
||||
def test_apply_chat_template():
|
||||
global server
|
||||
server.chat_template = "command-r"
|
||||
server.start()
|
||||
res = server.make_request("POST", "/apply-template", data={
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a test."},
|
||||
{"role": "user", "content":"Hi there"},
|
||||
]
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "prompt" in res.body
|
||||
assert res.body["prompt"] == "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>You are a test.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hi there<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("response_format,n_predicted,re_content", [
|
||||
({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""),
|
||||
({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"),
|
||||
({"type": "json_schema", "json_schema": {"schema": {"const": "foooooo"}}}, 10, "\"foooooo\""),
|
||||
({"type": "json_object"}, 10, "(\\{|John)+"),
|
||||
({"type": "sound"}, 0, None),
|
||||
# invalid response format (expected to fail)
|
||||
({"type": "json_object", "schema": 123}, 0, None),
|
||||
({"type": "json_object", "schema": {"type": 123}}, 0, None),
|
||||
({"type": "json_object", "schema": {"type": "hiccup"}}, 0, None),
|
||||
])
|
||||
def test_completion_with_response_format(response_format: dict, n_predicted: int, re_content: str | None):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predicted,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"response_format": response_format,
|
||||
})
|
||||
if re_content is not None:
|
||||
assert res.status_code == 200
|
||||
choice = res.body["choices"][0]
|
||||
assert match_regex(re_content, choice["message"]["content"])
|
||||
else:
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
@pytest.mark.parametrize("jinja,json_schema,n_predicted,re_content", [
|
||||
(False, {"const": "42"}, 6, "\"42\""),
|
||||
(True, {"const": "42"}, 6, "\"42\""),
|
||||
])
|
||||
def test_completion_with_json_schema(jinja: bool, json_schema: dict, n_predicted: int, re_content: str):
|
||||
global server
|
||||
server.jinja = jinja
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predicted,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"json_schema": json_schema,
|
||||
})
|
||||
assert res.status_code == 200, f'Expected 200, got {res.status_code}'
|
||||
choice = res.body["choices"][0]
|
||||
assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("jinja,grammar,n_predicted,re_content", [
|
||||
(False, 'root ::= "a"{5,5}', 6, "a{5,5}"),
|
||||
(True, 'root ::= "a"{5,5}', 6, "a{5,5}"),
|
||||
])
|
||||
def test_completion_with_grammar(jinja: bool, grammar: str, n_predicted: int, re_content: str):
|
||||
global server
|
||||
server.jinja = jinja
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predicted,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Does not matter what I say, does it?"},
|
||||
],
|
||||
"grammar": grammar,
|
||||
})
|
||||
assert res.status_code == 200, res.body
|
||||
choice = res.body["choices"][0]
|
||||
assert match_regex(re_content, choice["message"]["content"]), choice["message"]["content"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("messages", [
|
||||
None,
|
||||
"string",
|
||||
[123],
|
||||
[{}],
|
||||
[{"role": 123}],
|
||||
[{"role": "system", "content": 123}],
|
||||
# [{"content": "hello"}], # TODO: should not be a valid case
|
||||
[{"role": "system", "content": "test"}, {}],
|
||||
[{"role": "user", "content": "test"}, {"role": "assistant", "content": "test"}, {"role": "assistant", "content": "test"}],
|
||||
])
|
||||
def test_invalid_chat_completion_req(messages):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"messages": messages,
|
||||
})
|
||||
assert res.status_code == 400 or res.status_code == 500
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
def test_chat_completion_with_timings_per_token():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_stream_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 10,
|
||||
"messages": [{"role": "user", "content": "test"}],
|
||||
"stream": True,
|
||||
"stream_options": {"include_usage": True},
|
||||
"timings_per_token": True,
|
||||
})
|
||||
stats_received = False
|
||||
for i, data in enumerate(res):
|
||||
if i == 0:
|
||||
# Check first role message for stream=True
|
||||
assert data["choices"][0]["delta"]["content"] is None
|
||||
assert data["choices"][0]["delta"]["role"] == "assistant"
|
||||
assert "timings" not in data, f'First event should not have timings: {data}'
|
||||
else:
|
||||
if data["choices"]:
|
||||
assert "role" not in data["choices"][0]["delta"]
|
||||
else:
|
||||
assert "timings" in data
|
||||
assert "prompt_per_second" in data["timings"]
|
||||
assert "predicted_per_second" in data["timings"]
|
||||
assert "predicted_n" in data["timings"]
|
||||
assert data["timings"]["predicted_n"] <= 10
|
||||
stats_received = True
|
||||
assert stats_received
|
||||
|
||||
|
||||
def test_logprobs():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
temperature=0.0,
|
||||
messages=[
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
],
|
||||
max_tokens=5,
|
||||
logprobs=True,
|
||||
top_logprobs=10,
|
||||
)
|
||||
output_text = res.choices[0].message.content
|
||||
aggregated_text = ''
|
||||
assert res.choices[0].logprobs is not None
|
||||
assert res.choices[0].logprobs.content is not None
|
||||
for token in res.choices[0].logprobs.content:
|
||||
aggregated_text += token.token
|
||||
assert token.logprob <= 0.0
|
||||
assert token.bytes is not None
|
||||
assert len(token.top_logprobs) > 0
|
||||
assert aggregated_text == output_text
|
||||
|
||||
|
||||
def test_logprobs_stream():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
temperature=0.0,
|
||||
messages=[
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
],
|
||||
max_tokens=5,
|
||||
logprobs=True,
|
||||
top_logprobs=10,
|
||||
stream=True,
|
||||
)
|
||||
output_text = ''
|
||||
aggregated_text = ''
|
||||
for i, data in enumerate(res):
|
||||
if data.choices:
|
||||
choice = data.choices[0]
|
||||
if i == 0:
|
||||
# Check first role message for stream=True
|
||||
assert choice.delta.content is None
|
||||
assert choice.delta.role == "assistant"
|
||||
else:
|
||||
assert choice.delta.role is None
|
||||
if choice.finish_reason is None:
|
||||
if choice.delta.content:
|
||||
output_text += choice.delta.content
|
||||
assert choice.logprobs is not None
|
||||
assert choice.logprobs.content is not None
|
||||
for token in choice.logprobs.content:
|
||||
aggregated_text += token.token
|
||||
assert token.logprob <= 0.0
|
||||
assert token.bytes is not None
|
||||
assert token.top_logprobs is not None
|
||||
assert len(token.top_logprobs) > 0
|
||||
assert aggregated_text == output_text
|
||||
|
||||
|
||||
def test_logit_bias():
|
||||
global server
|
||||
server.start()
|
||||
|
||||
exclude = ["i", "I", "the", "The", "to", "a", "an", "be", "is", "was", "but", "But", "and", "And", "so", "So", "you", "You", "he", "He", "she", "She", "we", "We", "they", "They", "it", "It", "his", "His", "her", "Her", "book", "Book"]
|
||||
|
||||
res = server.make_request("POST", "/tokenize", data={
|
||||
"content": " " + " ".join(exclude) + " ",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
tokens = res.body["tokens"]
|
||||
logit_bias = {tok: -100 for tok in tokens}
|
||||
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
temperature=0.0,
|
||||
messages=[
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
],
|
||||
max_tokens=64,
|
||||
logit_bias=logit_bias
|
||||
)
|
||||
output_text = res.choices[0].message.content
|
||||
assert output_text
|
||||
assert all(output_text.find(" " + tok + " ") == -1 for tok in exclude)
|
||||
|
||||
def test_context_size_exceeded():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"messages": [
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
] * 100, # make the prompt too long
|
||||
})
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
assert res.body["error"]["type"] == "exceed_context_size_error"
|
||||
assert res.body["error"]["n_prompt_tokens"] > 0
|
||||
assert server.n_ctx is not None
|
||||
assert server.n_slots is not None
|
||||
assert res.body["error"]["n_ctx"] == server.n_ctx // server.n_slots
|
||||
|
||||
|
||||
def test_context_size_exceeded_stream():
|
||||
global server
|
||||
server.start()
|
||||
try:
|
||||
for _ in server.make_stream_request("POST", "/chat/completions", data={
|
||||
"messages": [
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
] * 100, # make the prompt too long
|
||||
"stream": True}):
|
||||
pass
|
||||
assert False, "Should have failed"
|
||||
except ServerError as e:
|
||||
assert e.code == 400
|
||||
assert "error" in e.body
|
||||
assert e.body["error"]["type"] == "exceed_context_size_error"
|
||||
assert e.body["error"]["n_prompt_tokens"] > 0
|
||||
assert server.n_ctx is not None
|
||||
assert server.n_slots is not None
|
||||
assert e.body["error"]["n_ctx"] == server.n_ctx // server.n_slots
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_batch,batch_count,reuse_cache",
|
||||
[
|
||||
(64, 4, False),
|
||||
(64, 2, True),
|
||||
]
|
||||
)
|
||||
def test_return_progress(n_batch, batch_count, reuse_cache):
|
||||
global server
|
||||
server.n_batch = n_batch
|
||||
server.n_ctx = 256
|
||||
server.n_slots = 1
|
||||
server.start()
|
||||
def make_cmpl_request():
|
||||
return server.make_stream_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 10,
|
||||
"messages": [
|
||||
{"role": "user", "content": "This is a test" * 10},
|
||||
],
|
||||
"stream": True,
|
||||
"return_progress": True,
|
||||
})
|
||||
if reuse_cache:
|
||||
# make a first request to populate the cache
|
||||
res0 = make_cmpl_request()
|
||||
for _ in res0:
|
||||
pass # discard the output
|
||||
|
||||
res = make_cmpl_request()
|
||||
last_progress = None
|
||||
total_batch_count = 0
|
||||
|
||||
for data in res:
|
||||
cur_progress = data.get("prompt_progress", None)
|
||||
if cur_progress is None:
|
||||
continue
|
||||
if total_batch_count == 0:
|
||||
# first progress report must have n_cache == n_processed
|
||||
assert cur_progress["total"] > 0
|
||||
assert cur_progress["cache"] == cur_progress["processed"]
|
||||
if reuse_cache:
|
||||
# when reusing cache, we expect some cached tokens
|
||||
assert cur_progress["cache"] > 0
|
||||
if last_progress is not None:
|
||||
assert cur_progress["total"] == last_progress["total"]
|
||||
assert cur_progress["cache"] == last_progress["cache"]
|
||||
assert cur_progress["processed"] > last_progress["processed"]
|
||||
total_batch_count += 1
|
||||
last_progress = cur_progress
|
||||
|
||||
# last progress should indicate completion (all tokens processed)
|
||||
assert last_progress is not None
|
||||
assert last_progress["total"] > 0
|
||||
assert last_progress["processed"] == last_progress["total"]
|
||||
assert total_batch_count == batch_count
|
||||
|
||||
|
||||
def test_chat_completions_multiple_choices():
|
||||
global server
|
||||
server.start()
|
||||
# make sure cache can be reused across multiple choices and multiple requests
|
||||
# ref: https://github.com/ggml-org/llama.cpp/pull/18663
|
||||
for _ in range(2):
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 8,
|
||||
"n": 2,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Book"},
|
||||
{"role": "user", "content": "What is the best book"},
|
||||
],
|
||||
# test forcing the same slot to be used
|
||||
# the scheduler should not be locked up in this case
|
||||
"id_slot": 0,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["choices"]) == 2
|
||||
for choice in res.body["choices"]:
|
||||
assert "assistant" == choice["message"]["role"]
|
||||
assert choice["finish_reason"] == "length"
|
||||
896
tools/server/tests/unit/test_compat_anthropic.py
Normal file
@@ -0,0 +1,896 @@
|
||||
#!/usr/bin/env python3
|
||||
import pytest
|
||||
import base64
|
||||
import requests
|
||||
|
||||
from utils import *
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
|
||||
def get_test_image_base64() -> str:
|
||||
"""Get a test image in base64 format"""
|
||||
# Use the same test image as test_vision_api.py
|
||||
IMG_URL = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png"
|
||||
response = requests.get(IMG_URL)
|
||||
response.raise_for_status()
|
||||
return base64.b64encode(response.content).decode("utf-8")
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.model_alias = "tinyllama-2-anthropic"
|
||||
server.server_port = 8082
|
||||
server.n_slots = 1
|
||||
server.n_ctx = 8192
|
||||
server.n_batch = 2048
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vision_server():
|
||||
"""Separate fixture for vision tests that require multimodal support"""
|
||||
global server
|
||||
server = ServerPreset.tinygemma3()
|
||||
server.offline = False # Allow downloading the model
|
||||
server.model_alias = "tinygemma3-anthropic"
|
||||
server.server_port = 8083 # Different port to avoid conflicts
|
||||
server.n_slots = 1
|
||||
return server
|
||||
|
||||
|
||||
# Basic message tests
|
||||
|
||||
def test_anthropic_messages_basic():
|
||||
"""Test basic Anthropic messages endpoint"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Say hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200, f"Expected 200, got {res.status_code}"
|
||||
assert res.body["type"] == "message", f"Expected type 'message', got {res.body.get('type')}"
|
||||
assert res.body["role"] == "assistant", f"Expected role 'assistant', got {res.body.get('role')}"
|
||||
assert "content" in res.body, "Missing 'content' field"
|
||||
assert isinstance(res.body["content"], list), "Content should be an array"
|
||||
assert len(res.body["content"]) > 0, "Content array should not be empty"
|
||||
assert res.body["content"][0]["type"] == "text", "First content block should be text"
|
||||
assert "text" in res.body["content"][0], "Text content block missing 'text' field"
|
||||
assert res.body["stop_reason"] in ["end_turn", "max_tokens"], f"Invalid stop_reason: {res.body.get('stop_reason')}"
|
||||
assert "usage" in res.body, "Missing 'usage' field"
|
||||
assert "input_tokens" in res.body["usage"], "Missing usage.input_tokens"
|
||||
assert "output_tokens" in res.body["usage"], "Missing usage.output_tokens"
|
||||
assert isinstance(res.body["usage"]["input_tokens"], int), "input_tokens should be integer"
|
||||
assert isinstance(res.body["usage"]["output_tokens"], int), "output_tokens should be integer"
|
||||
assert res.body["usage"]["output_tokens"] > 0, "Should have generated some tokens"
|
||||
# Anthropic API should NOT include timings
|
||||
assert "timings" not in res.body, "Anthropic API should not include timings field"
|
||||
|
||||
|
||||
def test_anthropic_messages_with_system():
|
||||
"""Test messages with system prompt"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"system": "You are a helpful assistant.",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
assert len(res.body["content"]) > 0
|
||||
|
||||
|
||||
def test_anthropic_messages_multipart_content():
|
||||
"""Test messages with multipart content blocks"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What is"},
|
||||
{"type": "text", "text": " the answer?"}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_messages_conversation():
|
||||
"""Test multi-turn conversation"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
{"role": "user", "content": "How are you?"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
# Streaming tests
|
||||
|
||||
def test_anthropic_messages_streaming():
|
||||
"""Test streaming messages"""
|
||||
server.start()
|
||||
|
||||
res = server.make_stream_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 30,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Say hello"}
|
||||
],
|
||||
"stream": True
|
||||
})
|
||||
|
||||
events = []
|
||||
for data in res:
|
||||
# Each event should have type and other fields
|
||||
assert "type" in data, f"Missing 'type' in event: {data}"
|
||||
events.append(data)
|
||||
|
||||
# Verify event sequence
|
||||
event_types = [e["type"] for e in events]
|
||||
assert "message_start" in event_types, "Missing message_start event"
|
||||
assert "content_block_start" in event_types, "Missing content_block_start event"
|
||||
assert "content_block_delta" in event_types, "Missing content_block_delta event"
|
||||
assert "content_block_stop" in event_types, "Missing content_block_stop event"
|
||||
assert "message_delta" in event_types, "Missing message_delta event"
|
||||
assert "message_stop" in event_types, "Missing message_stop event"
|
||||
|
||||
# Check message_start structure
|
||||
message_start = next(e for e in events if e["type"] == "message_start")
|
||||
assert "message" in message_start, "message_start missing 'message' field"
|
||||
assert message_start["message"]["type"] == "message"
|
||||
assert message_start["message"]["role"] == "assistant"
|
||||
assert message_start["message"]["content"] == []
|
||||
assert "usage" in message_start["message"]
|
||||
assert message_start["message"]["usage"]["input_tokens"] > 0
|
||||
|
||||
# Check content_block_start
|
||||
block_start = next(e for e in events if e["type"] == "content_block_start")
|
||||
assert "index" in block_start, "content_block_start missing 'index'"
|
||||
assert block_start["index"] == 0, "First content block should be at index 0"
|
||||
assert "content_block" in block_start
|
||||
assert block_start["content_block"]["type"] == "text"
|
||||
|
||||
# Check content_block_delta
|
||||
deltas = [e for e in events if e["type"] == "content_block_delta"]
|
||||
assert len(deltas) > 0, "Should have at least one content_block_delta"
|
||||
for delta in deltas:
|
||||
assert "index" in delta
|
||||
assert "delta" in delta
|
||||
assert delta["delta"]["type"] == "text_delta"
|
||||
assert "text" in delta["delta"]
|
||||
|
||||
# Check content_block_stop
|
||||
block_stop = next(e for e in events if e["type"] == "content_block_stop")
|
||||
assert "index" in block_stop
|
||||
assert block_stop["index"] == 0
|
||||
|
||||
# Check message_delta
|
||||
message_delta = next(e for e in events if e["type"] == "message_delta")
|
||||
assert "delta" in message_delta
|
||||
assert "stop_reason" in message_delta["delta"]
|
||||
assert message_delta["delta"]["stop_reason"] in ["end_turn", "max_tokens"]
|
||||
assert "usage" in message_delta
|
||||
assert message_delta["usage"]["output_tokens"] > 0
|
||||
|
||||
# Check message_stop
|
||||
message_stop = next(e for e in events if e["type"] == "message_stop")
|
||||
# message_stop should NOT have timings for Anthropic API
|
||||
assert "timings" not in message_stop, "Anthropic streaming should not include timings"
|
||||
|
||||
|
||||
# Token counting tests
|
||||
|
||||
def test_anthropic_count_tokens():
|
||||
"""Test token counting endpoint"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages/count_tokens", data={
|
||||
"model": "test",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello world"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert "input_tokens" in res.body
|
||||
assert isinstance(res.body["input_tokens"], int)
|
||||
assert res.body["input_tokens"] > 0
|
||||
# Should only have input_tokens, no other fields
|
||||
assert "output_tokens" not in res.body
|
||||
|
||||
|
||||
def test_anthropic_count_tokens_with_system():
|
||||
"""Test token counting with system prompt"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages/count_tokens", data={
|
||||
"model": "test",
|
||||
"system": "You are a helpful assistant.",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["input_tokens"] > 0
|
||||
|
||||
|
||||
def test_anthropic_count_tokens_no_max_tokens():
|
||||
"""Test that count_tokens doesn't require max_tokens"""
|
||||
server.start()
|
||||
|
||||
# max_tokens is NOT required for count_tokens
|
||||
res = server.make_request("POST", "/v1/messages/count_tokens", data={
|
||||
"model": "test",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert "input_tokens" in res.body
|
||||
|
||||
|
||||
# Tool use tests
|
||||
|
||||
def test_anthropic_tool_use_basic():
|
||||
"""Test basic tool use"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 200,
|
||||
"tools": [{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather in a location",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "City name"
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}],
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather in Paris?"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
assert len(res.body["content"]) > 0
|
||||
|
||||
# Check if model used the tool (it might not always, depending on the model)
|
||||
content_types = [block.get("type") for block in res.body["content"]]
|
||||
|
||||
if "tool_use" in content_types:
|
||||
# Model used the tool
|
||||
assert res.body["stop_reason"] == "tool_use"
|
||||
|
||||
# Find the tool_use block
|
||||
tool_block = next(b for b in res.body["content"] if b.get("type") == "tool_use")
|
||||
assert "id" in tool_block
|
||||
assert "name" in tool_block
|
||||
assert tool_block["name"] == "get_weather"
|
||||
assert "input" in tool_block
|
||||
assert isinstance(tool_block["input"], dict)
|
||||
|
||||
|
||||
def test_anthropic_tool_result():
|
||||
"""Test sending tool results back
|
||||
|
||||
This test verifies that tool_result blocks are properly converted to
|
||||
role="tool" messages internally. Without proper conversion, this would
|
||||
fail with a 500 error: "unsupported content[].type" because tool_result
|
||||
blocks would remain in the user message content array.
|
||||
"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 100,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "test123",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "Paris"}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": "test123",
|
||||
"content": "The weather is sunny, 25°C"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
# This would be 500 with the old bug where tool_result blocks weren't converted
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
# Model should respond to the tool result
|
||||
assert len(res.body["content"]) > 0
|
||||
assert res.body["content"][0]["type"] == "text"
|
||||
|
||||
|
||||
def test_anthropic_tool_result_with_text():
|
||||
"""Test tool result mixed with text content
|
||||
|
||||
This tests the edge case where a user message contains both text and
|
||||
tool_result blocks. The server must properly split these into separate
|
||||
messages: a user message with text, followed by tool messages.
|
||||
Without proper handling, this would fail with 500: "unsupported content[].type"
|
||||
"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 100,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "tool_1",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "Paris"}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Here are the results:"},
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": "tool_1",
|
||||
"content": "Sunny, 25°C"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
assert len(res.body["content"]) > 0
|
||||
|
||||
|
||||
def test_anthropic_tool_result_error():
|
||||
"""Test tool result with error flag"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 100,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Get the weather"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "test123",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "InvalidCity"}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": "test123",
|
||||
"is_error": True,
|
||||
"content": "City not found"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_tool_streaming():
|
||||
"""Test streaming with tool use"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_stream_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 200,
|
||||
"stream": True,
|
||||
"tools": [{
|
||||
"name": "calculator",
|
||||
"description": "Calculate math",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"expression": {"type": "string"}
|
||||
},
|
||||
"required": ["expression"]
|
||||
}
|
||||
}],
|
||||
"messages": [
|
||||
{"role": "user", "content": "Calculate 2+2"}
|
||||
]
|
||||
})
|
||||
|
||||
events = []
|
||||
for data in res:
|
||||
events.append(data)
|
||||
|
||||
event_types = [e["type"] for e in events]
|
||||
|
||||
# Should have basic events
|
||||
assert "message_start" in event_types
|
||||
assert "message_stop" in event_types
|
||||
|
||||
# If tool was used, check for proper tool streaming
|
||||
if any(e.get("type") == "content_block_start" and
|
||||
e.get("content_block", {}).get("type") == "tool_use"
|
||||
for e in events):
|
||||
# Find tool use block start
|
||||
tool_starts = [e for e in events if
|
||||
e.get("type") == "content_block_start" and
|
||||
e.get("content_block", {}).get("type") == "tool_use"]
|
||||
|
||||
assert len(tool_starts) > 0, "Should have tool_use content_block_start"
|
||||
|
||||
# Check index is correct (should be 0 if no text, 1 if there's text)
|
||||
tool_start = tool_starts[0]
|
||||
assert "index" in tool_start
|
||||
assert tool_start["content_block"]["type"] == "tool_use"
|
||||
assert "name" in tool_start["content_block"]
|
||||
|
||||
|
||||
# Vision/multimodal tests
|
||||
|
||||
def test_anthropic_vision_format_accepted():
|
||||
"""Test that Anthropic vision format is accepted (format validation only)"""
|
||||
server.start()
|
||||
|
||||
# Small 1x1 red PNG image in base64
|
||||
red_pixel_png = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 10,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/png",
|
||||
"data": red_pixel_png
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "What is this?"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
# Server accepts the format but tinyllama doesn't support images
|
||||
# So it should return 500 with clear error message about missing mmproj
|
||||
assert res.status_code == 500
|
||||
assert "image input is not supported" in res.body.get("error", {}).get("message", "").lower()
|
||||
|
||||
|
||||
def test_anthropic_vision_base64_with_multimodal_model(vision_server):
|
||||
"""Test vision with base64 image using Anthropic format with multimodal model"""
|
||||
global server
|
||||
server = vision_server
|
||||
server.start()
|
||||
|
||||
# Get test image in base64 format
|
||||
image_base64 = get_test_image_base64()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 10,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/png",
|
||||
"data": image_base64
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "What is this:\n"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200, f"Expected 200, got {res.status_code}: {res.body}"
|
||||
assert res.body["type"] == "message"
|
||||
assert len(res.body["content"]) > 0
|
||||
assert res.body["content"][0]["type"] == "text"
|
||||
# The model should generate some response about the image
|
||||
assert len(res.body["content"][0]["text"]) > 0
|
||||
|
||||
|
||||
# Parameter tests
|
||||
|
||||
def test_anthropic_stop_sequences():
|
||||
"""Test stop_sequences parameter"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 100,
|
||||
"stop_sequences": ["\n", "END"],
|
||||
"messages": [
|
||||
{"role": "user", "content": "Count to 10"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_temperature():
|
||||
"""Test temperature parameter"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"temperature": 0.5,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_top_p():
|
||||
"""Test top_p parameter"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"top_p": 0.9,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_top_k():
|
||||
"""Test top_k parameter (llama.cpp specific)"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"top_k": 40,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
# Error handling tests
|
||||
|
||||
def test_anthropic_missing_messages():
|
||||
"""Test error when messages are missing"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50
|
||||
# missing "messages" field
|
||||
})
|
||||
|
||||
# Should return an error (400 or 500)
|
||||
assert res.status_code >= 400
|
||||
|
||||
|
||||
def test_anthropic_empty_messages():
|
||||
"""Test permissive handling of empty messages array"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": []
|
||||
})
|
||||
|
||||
# Server is permissive and accepts empty messages (provides defaults)
|
||||
# This matches the permissive validation design choice
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
# Content block index tests
|
||||
|
||||
def test_anthropic_streaming_content_block_indices():
|
||||
"""Test that content block indices are correct in streaming"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
# Request that might produce both text and tool use
|
||||
res = server.make_stream_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 400,
|
||||
"stream": True,
|
||||
"tools": [{
|
||||
"name": "test_tool",
|
||||
"description": "A test tool",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param": {"type": "string"}
|
||||
},
|
||||
"required": ["param"]
|
||||
}
|
||||
}],
|
||||
"messages": [
|
||||
{"role": "user", "content": "Use the test tool"}
|
||||
]
|
||||
})
|
||||
|
||||
events = []
|
||||
for data in res:
|
||||
events.append(data)
|
||||
|
||||
# Check content_block_start events have sequential indices
|
||||
block_starts = [e for e in events if e.get("type") == "content_block_start"]
|
||||
if len(block_starts) > 1:
|
||||
# If there are multiple blocks, indices should be sequential
|
||||
indices = [e["index"] for e in block_starts]
|
||||
expected_indices = list(range(len(block_starts)))
|
||||
assert indices == expected_indices, f"Expected indices {expected_indices}, got {indices}"
|
||||
|
||||
# Check content_block_stop events match the starts
|
||||
block_stops = [e for e in events if e.get("type") == "content_block_stop"]
|
||||
start_indices = set(e["index"] for e in block_starts)
|
||||
stop_indices = set(e["index"] for e in block_stops)
|
||||
assert start_indices == stop_indices, "content_block_stop indices should match content_block_start indices"
|
||||
|
||||
|
||||
# Extended features tests
|
||||
|
||||
def test_anthropic_thinking():
|
||||
"""Test extended thinking parameter"""
|
||||
server.jinja = True
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 100,
|
||||
"thinking": {
|
||||
"type": "enabled",
|
||||
"budget_tokens": 50
|
||||
},
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is 2+2?"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
def test_anthropic_metadata():
|
||||
"""Test metadata parameter"""
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"metadata": {
|
||||
"user_id": "test_user_123"
|
||||
},
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
|
||||
# Compatibility tests
|
||||
|
||||
def test_anthropic_vs_openai_different_response_format():
|
||||
"""Verify Anthropic format is different from OpenAI format"""
|
||||
server.start()
|
||||
|
||||
# Make OpenAI request
|
||||
openai_res = server.make_request("POST", "/v1/chat/completions", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
# Make Anthropic request
|
||||
anthropic_res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 50,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
})
|
||||
|
||||
assert openai_res.status_code == 200
|
||||
assert anthropic_res.status_code == 200
|
||||
|
||||
# OpenAI has "object", Anthropic has "type"
|
||||
assert "object" in openai_res.body
|
||||
assert "type" in anthropic_res.body
|
||||
assert openai_res.body["object"] == "chat.completion"
|
||||
assert anthropic_res.body["type"] == "message"
|
||||
|
||||
# OpenAI has "choices", Anthropic has "content"
|
||||
assert "choices" in openai_res.body
|
||||
assert "content" in anthropic_res.body
|
||||
|
||||
# Different usage field names
|
||||
assert "prompt_tokens" in openai_res.body["usage"]
|
||||
assert "input_tokens" in anthropic_res.body["usage"]
|
||||
assert "completion_tokens" in openai_res.body["usage"]
|
||||
assert "output_tokens" in anthropic_res.body["usage"]
|
||||
|
||||
|
||||
# Extended thinking tests with reasoning models
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [False, True])
|
||||
def test_anthropic_thinking_with_reasoning_model(stream):
|
||||
"""Test that thinking content blocks are properly returned for reasoning models"""
|
||||
global server
|
||||
server = ServerProcess()
|
||||
server.model_hf_repo = "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF"
|
||||
server.model_hf_file = "DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf"
|
||||
server.reasoning_format = "deepseek"
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192
|
||||
server.n_predict = 1024
|
||||
server.server_port = 8084
|
||||
server.start(timeout_seconds=600) # large model needs time to download
|
||||
|
||||
if stream:
|
||||
res = server.make_stream_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 1024,
|
||||
"thinking": {
|
||||
"type": "enabled",
|
||||
"budget_tokens": 500
|
||||
},
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is 2+2?"}
|
||||
],
|
||||
"stream": True
|
||||
})
|
||||
|
||||
events = list(res)
|
||||
|
||||
# should have thinking content block events
|
||||
thinking_starts = [e for e in events if
|
||||
e.get("type") == "content_block_start" and
|
||||
e.get("content_block", {}).get("type") == "thinking"]
|
||||
assert len(thinking_starts) > 0, "Should have thinking content_block_start event"
|
||||
assert thinking_starts[0]["index"] == 0, "Thinking block should be at index 0"
|
||||
|
||||
# should have thinking_delta events
|
||||
thinking_deltas = [e for e in events if
|
||||
e.get("type") == "content_block_delta" and
|
||||
e.get("delta", {}).get("type") == "thinking_delta"]
|
||||
assert len(thinking_deltas) > 0, "Should have thinking_delta events"
|
||||
|
||||
# should have signature_delta event before thinking block closes (Anthropic API requirement)
|
||||
signature_deltas = [e for e in events if
|
||||
e.get("type") == "content_block_delta" and
|
||||
e.get("delta", {}).get("type") == "signature_delta"]
|
||||
assert len(signature_deltas) > 0, "Should have signature_delta event for thinking block"
|
||||
|
||||
# should have text block after thinking
|
||||
text_starts = [e for e in events if
|
||||
e.get("type") == "content_block_start" and
|
||||
e.get("content_block", {}).get("type") == "text"]
|
||||
assert len(text_starts) > 0, "Should have text content_block_start event"
|
||||
assert text_starts[0]["index"] == 1, "Text block should be at index 1 (after thinking)"
|
||||
else:
|
||||
res = server.make_request("POST", "/v1/messages", data={
|
||||
"model": "test",
|
||||
"max_tokens": 1024,
|
||||
"thinking": {
|
||||
"type": "enabled",
|
||||
"budget_tokens": 500
|
||||
},
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is 2+2?"}
|
||||
]
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert res.body["type"] == "message"
|
||||
|
||||
content = res.body["content"]
|
||||
assert len(content) >= 2, "Should have at least thinking and text blocks"
|
||||
|
||||
# first block should be thinking
|
||||
thinking_blocks = [b for b in content if b.get("type") == "thinking"]
|
||||
assert len(thinking_blocks) > 0, "Should have thinking content block"
|
||||
assert "thinking" in thinking_blocks[0], "Thinking block should have 'thinking' field"
|
||||
assert len(thinking_blocks[0]["thinking"]) > 0, "Thinking content should not be empty"
|
||||
assert "signature" in thinking_blocks[0], "Thinking block should have 'signature' field (Anthropic API requirement)"
|
||||
|
||||
# should also have text block
|
||||
text_blocks = [b for b in content if b.get("type") == "text"]
|
||||
assert len(text_blocks) > 0, "Should have text content block"
|
||||
608
tools/server/tests/unit/test_completion.py
Normal file
@@ -0,0 +1,608 @@
|
||||
import pytest
|
||||
import requests
|
||||
import time
|
||||
import random
|
||||
|
||||
from openai import OpenAI
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
JSON_MULTIMODAL_KEY = "multimodal_data"
|
||||
JSON_PROMPT_STRING_KEY = "prompt_string"
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated,return_tokens", [
|
||||
("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False, False),
|
||||
("Write a joke about AI from a very long prompt which will not be truncated", 64, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False, True),
|
||||
])
|
||||
def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool, return_tokens: bool):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": n_predict,
|
||||
"prompt": prompt,
|
||||
"return_tokens": return_tokens,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["timings"]["prompt_n"] == n_prompt
|
||||
assert res.body["timings"]["predicted_n"] == n_predicted
|
||||
assert res.body["truncated"] == truncated
|
||||
assert type(res.body["has_new_line"]) == bool
|
||||
assert match_regex(re_content, res.body["content"])
|
||||
if return_tokens:
|
||||
assert len(res.body["tokens"]) > 0
|
||||
assert all(type(tok) == int for tok in res.body["tokens"])
|
||||
else:
|
||||
assert res.body["tokens"] == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
|
||||
("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
|
||||
("Write a joke about AI from a very long prompt which will not be truncated", 64, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
|
||||
])
|
||||
def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_stream_request("POST", "/completion", data={
|
||||
"n_predict": n_predict,
|
||||
"prompt": prompt,
|
||||
"stream": True,
|
||||
})
|
||||
content = ""
|
||||
for data in res:
|
||||
assert "stop" in data and type(data["stop"]) == bool
|
||||
if data["stop"]:
|
||||
assert data["timings"]["prompt_n"] == n_prompt
|
||||
assert data["timings"]["predicted_n"] == n_predicted
|
||||
assert data["truncated"] == truncated
|
||||
assert data["stop_type"] == "limit"
|
||||
assert type(data["has_new_line"]) == bool
|
||||
assert "generation_settings" in data
|
||||
assert server.n_predict is not None
|
||||
assert data["generation_settings"]["n_predict"] == min(n_predict, server.n_predict)
|
||||
assert data["generation_settings"]["seed"] == server.seed
|
||||
assert match_regex(re_content, content)
|
||||
else:
|
||||
assert len(data["tokens"]) > 0
|
||||
assert all(type(tok) == int for tok in data["tokens"])
|
||||
content += data["content"]
|
||||
|
||||
|
||||
def test_completion_stream_vs_non_stream():
|
||||
global server
|
||||
server.start()
|
||||
res_stream = server.make_stream_request("POST", "/completion", data={
|
||||
"n_predict": 8,
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"stream": True,
|
||||
})
|
||||
res_non_stream = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 8,
|
||||
"prompt": "I believe the meaning of life is",
|
||||
})
|
||||
content_stream = ""
|
||||
for data in res_stream:
|
||||
content_stream += data["content"]
|
||||
assert content_stream == res_non_stream.body["content"]
|
||||
|
||||
|
||||
def test_completion_with_openai_library():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.completions.create(
|
||||
model="davinci-002",
|
||||
prompt="I believe the meaning of life is",
|
||||
max_tokens=8,
|
||||
)
|
||||
assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b")
|
||||
assert res.choices[0].finish_reason == "length"
|
||||
assert res.choices[0].text is not None
|
||||
assert match_regex("(going|bed)+", res.choices[0].text)
|
||||
|
||||
|
||||
def test_completion_stream_with_openai_library():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.completions.create(
|
||||
model="davinci-002",
|
||||
prompt="I believe the meaning of life is",
|
||||
max_tokens=8,
|
||||
stream=True,
|
||||
)
|
||||
output_text = ''
|
||||
for data in res:
|
||||
choice = data.choices[0]
|
||||
if choice.finish_reason is None:
|
||||
assert choice.text is not None
|
||||
output_text += choice.text
|
||||
assert match_regex("(going|bed)+", output_text)
|
||||
|
||||
|
||||
# Test case from https://github.com/ggml-org/llama.cpp/issues/13780
|
||||
@pytest.mark.slow
|
||||
def test_completion_stream_with_openai_library_stops():
|
||||
global server
|
||||
server.model_hf_repo = "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M"
|
||||
server.model_hf_file = None
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.completions.create(
|
||||
model="davinci-002",
|
||||
prompt="System: You are helpfull assistant.\nAssistant:\nHey! How could I help?\nUser:\nTell me a joke.\nAssistant:\n",
|
||||
stop=["User:\n", "Assistant:\n"],
|
||||
max_tokens=200,
|
||||
stream=True,
|
||||
)
|
||||
output_text = ''
|
||||
for data in res:
|
||||
choice = data.choices[0]
|
||||
if choice.finish_reason is None:
|
||||
assert choice.text is not None
|
||||
output_text += choice.text
|
||||
assert match_regex("Sure, here's one for[\\s\\S]*", output_text), f'Unexpected output: {output_text}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_slots", [1, 2])
|
||||
def test_consistent_result_same_seed(n_slots: int):
|
||||
global server
|
||||
server.n_slots = n_slots
|
||||
server.start()
|
||||
last_res = None
|
||||
for _ in range(4):
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
"cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
|
||||
})
|
||||
if last_res is not None:
|
||||
assert res.body["content"] == last_res.body["content"]
|
||||
last_res = res
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_slots", [1, 2])
|
||||
def test_different_result_different_seed(n_slots: int):
|
||||
global server
|
||||
server.n_slots = n_slots
|
||||
server.start()
|
||||
last_res = None
|
||||
for seed in range(4):
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"seed": seed,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
|
||||
})
|
||||
if last_res is not None:
|
||||
assert res.body["content"] != last_res.body["content"]
|
||||
last_res = res
|
||||
|
||||
# TODO figure why it don't work with temperature = 1
|
||||
# @pytest.mark.parametrize("temperature", [0.0, 1.0])
|
||||
@pytest.mark.parametrize("n_batch", [16, 32])
|
||||
@pytest.mark.parametrize("temperature", [0.0])
|
||||
def test_consistent_result_different_batch_size(n_batch: int, temperature: float):
|
||||
global server
|
||||
server.n_batch = n_batch
|
||||
server.start()
|
||||
last_res = None
|
||||
for _ in range(4):
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"seed": 42,
|
||||
"temperature": temperature,
|
||||
"cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
|
||||
})
|
||||
if last_res is not None:
|
||||
assert res.body["content"] == last_res.body["content"]
|
||||
last_res = res
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="This test fails on linux, need to be fixed")
|
||||
def test_cache_vs_nocache_prompt():
|
||||
global server
|
||||
server.start()
|
||||
res_cache = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
res_no_cache = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": False,
|
||||
})
|
||||
assert res_cache.body["content"] == res_no_cache.body["content"]
|
||||
|
||||
|
||||
def test_nocache_long_input_prompt():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is"*32,
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": False,
|
||||
})
|
||||
assert res.status_code == 400
|
||||
|
||||
def test_json_prompt_no_mtmd():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": { JSON_PROMPT_STRING_KEY: "I believe the meaning of life is" },
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": False,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
|
||||
def test_json_prompt_mtm_error_when_not_supported():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": { JSON_PROMPT_STRING_KEY: "I believe the meaning of life is <__media__>", JSON_MULTIMODAL_KEY: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=" },
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
"cache_prompt": False,
|
||||
})
|
||||
# MTMD is disabled on this model, so this should fail.
|
||||
assert res.status_code != 200
|
||||
|
||||
def test_completion_with_tokens_input():
|
||||
global server
|
||||
server.temperature = 0.0
|
||||
server.start()
|
||||
prompt_str = "I believe the meaning of life is"
|
||||
res = server.make_request("POST", "/tokenize", data={
|
||||
"content": prompt_str,
|
||||
"add_special": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
tokens = res.body["tokens"]
|
||||
|
||||
# single completion
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": tokens,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert type(res.body["content"]) == str
|
||||
|
||||
# batch completion
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": [tokens, tokens],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert type(res.body) == list
|
||||
assert len(res.body) == 2
|
||||
assert res.body[0]["content"] == res.body[1]["content"]
|
||||
|
||||
# mixed string and tokens
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": [tokens, prompt_str],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert type(res.body) == list
|
||||
assert len(res.body) == 2
|
||||
assert res.body[0]["content"] == res.body[1]["content"]
|
||||
|
||||
# mixed JSON and tokens
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": [
|
||||
tokens,
|
||||
{
|
||||
JSON_PROMPT_STRING_KEY: "I believe the meaning of life is",
|
||||
},
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert type(res.body) == list
|
||||
assert len(res.body) == 2
|
||||
assert res.body[0]["content"] == res.body[1]["content"]
|
||||
|
||||
# mixed string and tokens in one sequence
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert type(res.body["content"]) == str
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_slots,n_requests", [
|
||||
(1, 3),
|
||||
(2, 2),
|
||||
(2, 4),
|
||||
(4, 2), # some slots must be idle
|
||||
(4, 6),
|
||||
])
|
||||
def test_completion_parallel_slots(n_slots: int, n_requests: int):
|
||||
global server
|
||||
server.n_slots = n_slots
|
||||
server.temperature = 0.0
|
||||
server.start()
|
||||
|
||||
PROMPTS = [
|
||||
("Write a very long book.", "(very|special|big)+"),
|
||||
("Write another a poem.", "(small|house)+"),
|
||||
("What is LLM?", "(Dad|said)+"),
|
||||
("The sky is blue and I love it.", "(climb|leaf)+"),
|
||||
("Write another very long music lyrics.", "(friends|step|sky)+"),
|
||||
("Write a very long joke.", "(cat|Whiskers)+"),
|
||||
]
|
||||
def check_slots_status():
|
||||
should_all_slots_busy = n_requests >= n_slots
|
||||
time.sleep(0.1)
|
||||
res = server.make_request("GET", "/slots")
|
||||
n_busy = sum([1 for slot in res.body if slot["is_processing"]])
|
||||
if should_all_slots_busy:
|
||||
assert n_busy == n_slots
|
||||
else:
|
||||
assert n_busy <= n_slots
|
||||
|
||||
tasks = []
|
||||
for i in range(n_requests):
|
||||
prompt, re_content = PROMPTS[i % len(PROMPTS)]
|
||||
tasks.append((server.make_request, ("POST", "/completion", {
|
||||
"prompt": prompt,
|
||||
"seed": 42,
|
||||
"temperature": 1.0,
|
||||
})))
|
||||
tasks.append((check_slots_status, ()))
|
||||
results = parallel_function_calls(tasks)
|
||||
|
||||
# check results
|
||||
for i in range(n_requests):
|
||||
prompt, re_content = PROMPTS[i % len(PROMPTS)]
|
||||
res = results[i]
|
||||
assert res.status_code == 200
|
||||
assert type(res.body["content"]) == str
|
||||
assert len(res.body["content"]) > 10
|
||||
# FIXME: the result is not deterministic when using other slot than slot 0
|
||||
# assert match_regex(re_content, res.body["content"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_ctx,n_slots,n_predict_vals,expected_success",
|
||||
[
|
||||
(256, 4, [80, 40, 80, 80], [True, True, True, True]),
|
||||
(256, 4, [70, 70, 70, 70], [False, False, False, False]),
|
||||
(256, 4, [90, 90, 40, 90], [False, False, True, False]),
|
||||
(256, 4, [90, 90, 40, 75], [True, True, True, True]),
|
||||
],
|
||||
)
|
||||
def test_completion_unified(n_ctx, n_slots, n_predict_vals, expected_success):
|
||||
global server
|
||||
server.n_slots = n_slots
|
||||
server.kv_unified = True
|
||||
server.n_ctx = n_ctx
|
||||
server.start()
|
||||
prompt = "A"
|
||||
tasks = []
|
||||
for n_predict in n_predict_vals:
|
||||
tasks.append((server.make_request, ("POST", "/completion", {"prompt": prompt, "n_predict": n_predict})))
|
||||
results = parallel_function_calls(tasks)
|
||||
for res, n_predict, expect_ok in zip(results, n_predict_vals, expected_success):
|
||||
if expect_ok:
|
||||
assert res.status_code == 200
|
||||
|
||||
# note: https://github.com/ggml-org/llama.cpp/pull/18700#issuecomment-3728695581
|
||||
if res.status_code == 200:
|
||||
assert "content" in res.body
|
||||
if "timings" in res.body:
|
||||
assert res.body["timings"]["predicted_n"] == n_predict
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt,n_predict,response_fields",
|
||||
[
|
||||
("I believe the meaning of life is", 8, []),
|
||||
("I believe the meaning of life is", 32, ["content", "generation_settings/n_predict", "prompt"]),
|
||||
],
|
||||
)
|
||||
def test_completion_response_fields(
|
||||
prompt: str, n_predict: int, response_fields: list[str]
|
||||
):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request(
|
||||
"POST",
|
||||
"/completion",
|
||||
data={
|
||||
"n_predict": n_predict,
|
||||
"prompt": prompt,
|
||||
"response_fields": response_fields,
|
||||
},
|
||||
)
|
||||
assert res.status_code == 200
|
||||
assert "content" in res.body
|
||||
assert len(res.body["content"])
|
||||
if len(response_fields):
|
||||
assert res.body["generation_settings/n_predict"] == n_predict
|
||||
assert res.body["prompt"] == "<s> " + prompt
|
||||
assert isinstance(res.body["content"], str)
|
||||
assert len(res.body) == len(response_fields)
|
||||
else:
|
||||
assert len(res.body)
|
||||
assert "generation_settings" in res.body
|
||||
|
||||
|
||||
def test_n_probs():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"n_probs": 10,
|
||||
"temperature": 0.0,
|
||||
"n_predict": 5,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "completion_probabilities" in res.body
|
||||
assert len(res.body["completion_probabilities"]) == 5
|
||||
for tok in res.body["completion_probabilities"]:
|
||||
assert "id" in tok and tok["id"] > 0
|
||||
assert "token" in tok and type(tok["token"]) == str
|
||||
assert "logprob" in tok and tok["logprob"] <= 0.0
|
||||
assert "bytes" in tok and type(tok["bytes"]) == list
|
||||
assert len(tok["top_logprobs"]) == 10
|
||||
for prob in tok["top_logprobs"]:
|
||||
assert "id" in prob and prob["id"] > 0
|
||||
assert "token" in prob and type(prob["token"]) == str
|
||||
assert "logprob" in prob and prob["logprob"] <= 0.0
|
||||
assert "bytes" in prob and type(prob["bytes"]) == list
|
||||
|
||||
|
||||
def test_n_probs_stream():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_stream_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"n_probs": 10,
|
||||
"temperature": 0.0,
|
||||
"n_predict": 5,
|
||||
"stream": True,
|
||||
})
|
||||
for data in res:
|
||||
if data["stop"] == False:
|
||||
assert "completion_probabilities" in data
|
||||
assert len(data["completion_probabilities"]) == 1
|
||||
for tok in data["completion_probabilities"]:
|
||||
assert "id" in tok and tok["id"] > 0
|
||||
assert "token" in tok and type(tok["token"]) == str
|
||||
assert "logprob" in tok and tok["logprob"] <= 0.0
|
||||
assert "bytes" in tok and type(tok["bytes"]) == list
|
||||
assert len(tok["top_logprobs"]) == 10
|
||||
for prob in tok["top_logprobs"]:
|
||||
assert "id" in prob and prob["id"] > 0
|
||||
assert "token" in prob and type(prob["token"]) == str
|
||||
assert "logprob" in prob and prob["logprob"] <= 0.0
|
||||
assert "bytes" in prob and type(prob["bytes"]) == list
|
||||
|
||||
|
||||
def test_n_probs_post_sampling():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"n_probs": 10,
|
||||
"temperature": 0.0,
|
||||
"n_predict": 5,
|
||||
"post_sampling_probs": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "completion_probabilities" in res.body
|
||||
assert len(res.body["completion_probabilities"]) == 5
|
||||
for tok in res.body["completion_probabilities"]:
|
||||
assert "id" in tok and tok["id"] > 0
|
||||
assert "token" in tok and type(tok["token"]) == str
|
||||
assert "prob" in tok and 0.0 < tok["prob"] <= 1.0
|
||||
assert "bytes" in tok and type(tok["bytes"]) == list
|
||||
assert len(tok["top_probs"]) == 10
|
||||
for prob in tok["top_probs"]:
|
||||
assert "id" in prob and prob["id"] > 0
|
||||
assert "token" in prob and type(prob["token"]) == str
|
||||
assert "prob" in prob and 0.0 <= prob["prob"] <= 1.0
|
||||
assert "bytes" in prob and type(prob["bytes"]) == list
|
||||
# because the test model usually output token with either 100% or 0% probability, we need to check all the top_probs
|
||||
assert any(prob["prob"] == 1.0 for prob in tok["top_probs"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tokenize,openai_style", [(False, False), (False, True), (True, False), (True, True)])
|
||||
def test_logit_bias(tokenize, openai_style):
|
||||
global server
|
||||
server.start()
|
||||
|
||||
exclude = ["i", "I", "the", "The", "to", "a", "an", "be", "is", "was", "but", "But", "and", "And", "so", "So", "you", "You", "he", "He", "she", "She", "we", "We", "they", "They", "it", "It", "his", "His", "her", "Her", "book", "Book"]
|
||||
|
||||
logit_bias = []
|
||||
if tokenize:
|
||||
res = server.make_request("POST", "/tokenize", data={
|
||||
"content": " " + " ".join(exclude) + " ",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
tokens = res.body["tokens"]
|
||||
logit_bias = [[tok, -100] for tok in tokens]
|
||||
|
||||
else:
|
||||
logit_bias = [[" " + tok + " ", -100] for tok in exclude]
|
||||
|
||||
if openai_style:
|
||||
logit_bias = {el[0]: -100 for el in logit_bias}
|
||||
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 64,
|
||||
"prompt": "What is the best book",
|
||||
"logit_bias": logit_bias,
|
||||
"temperature": 0.0
|
||||
})
|
||||
assert res.status_code == 200
|
||||
output_text = res.body["content"]
|
||||
assert all(output_text.find(" " + tok + " ") == -1 for tok in exclude)
|
||||
|
||||
|
||||
def test_cancel_request():
|
||||
global server
|
||||
server.n_ctx = 4096
|
||||
server.n_predict = -1
|
||||
server.n_slots = 1
|
||||
server.server_slots = True
|
||||
server.start()
|
||||
# send a request that will take a long time, but cancel it before it finishes
|
||||
try:
|
||||
server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
}, timeout=0.1)
|
||||
except requests.exceptions.ReadTimeout:
|
||||
pass # expected
|
||||
# make sure the slot is free
|
||||
time.sleep(1) # wait for HTTP_POLLING_SECONDS
|
||||
res = server.make_request("GET", "/slots")
|
||||
assert res.body[0]["is_processing"] == False
|
||||
|
||||
|
||||
# this test exercises the host-memory prompt cache
|
||||
# ref: https://github.com/ggml-org/llama.cpp/pull/16391
|
||||
# ref: https://github.com/ggml-org/llama.cpp/pull/17078
|
||||
def test_completion_prompt_cache():
|
||||
global server
|
||||
server.n_slots = 2
|
||||
server.kv_unified = True
|
||||
server.start()
|
||||
|
||||
for _ in range(16):
|
||||
# generate alternating random prompts with variable lengths in order to get them in and out of the cache
|
||||
r = random.randint(0, 4)
|
||||
prompt = (" Hello " + str(r)) * (40 + r)
|
||||
n_prompt = (40 + r)*5 + 2
|
||||
n_predict = random.randint(1, 8)
|
||||
|
||||
res = server.make_request(
|
||||
"POST",
|
||||
"/completion",
|
||||
data={
|
||||
"prompt": prompt,
|
||||
"n_predict": n_predict,
|
||||
},
|
||||
)
|
||||
|
||||
assert res.status_code == 200
|
||||
assert "content" in res.body
|
||||
content = res.body["content"]
|
||||
assert isinstance(content, str)
|
||||
assert len(content) > 0
|
||||
|
||||
assert type(res.body["has_new_line"]) == bool
|
||||
assert "timings" in res.body
|
||||
timings = res.body["timings"]
|
||||
|
||||
assert "prompt_n" in timings and timings["prompt_n"] + timings["cache_n"] == n_prompt
|
||||
assert "predicted_n" in timings and timings["predicted_n"] == n_predict
|
||||
assert "tokens" in res.body and isinstance(res.body["tokens"], list)
|
||||
89
tools/server/tests/unit/test_ctx_shift.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
SHORT_TEXT = """
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
|
||||
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
|
||||
""".strip()
|
||||
|
||||
LONG_TEXT = """
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
|
||||
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
|
||||
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
""".strip()
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.n_ctx = 512
|
||||
server.n_slots = 2
|
||||
server.n_predict = 128
|
||||
|
||||
|
||||
def test_ctx_shift_enabled():
|
||||
# the prompt is 226 tokens
|
||||
# the slot context is 512/2 = 256 tokens
|
||||
# 96 tokens are generated thanks to shifting the context when it gets full
|
||||
global server
|
||||
server.enable_ctx_shift = True
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 96,
|
||||
"prompt": SHORT_TEXT,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["timings"]["prompt_n"] == 226
|
||||
assert res.body["timings"]["predicted_n"] == 96
|
||||
assert res.body["truncated"] is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_predict,n_token_output,truncated", [
|
||||
(64, 64, False),
|
||||
(-1, 248, True), # 8 tokens prompt + 248 tokens generated = 256 tokens total
|
||||
])
|
||||
def test_ctx_shift_disabled_short_prompt(n_predict: int, n_token_output: int, truncated: bool):
|
||||
global server
|
||||
server.n_predict = -1
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": n_predict,
|
||||
"prompt": "Hi how are you",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["timings"]["predicted_n"] == n_token_output
|
||||
assert res.body["truncated"] == truncated
|
||||
|
||||
|
||||
def test_ctx_shift_disabled_long_prompt():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 64,
|
||||
"prompt": LONG_TEXT,
|
||||
})
|
||||
assert res.status_code != 200
|
||||
assert "error" in res.body
|
||||
assert "exceeds the available context size" in res.body["error"]["message"]
|
||||
|
||||
def test_ctx_shift_disabled_stream():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_stream_request("POST", "/v1/completions", data={
|
||||
"n_predict": 256,
|
||||
"prompt": "Once",
|
||||
"stream": True,
|
||||
})
|
||||
content = ""
|
||||
for data in res:
|
||||
choice = data["choices"][0]
|
||||
if choice["finish_reason"] == "length":
|
||||
assert len(content) > 0
|
||||
else:
|
||||
assert choice["finish_reason"] is None
|
||||
content += choice["text"]
|
||||
257
tools/server/tests/unit/test_embedding.py
Normal file
@@ -0,0 +1,257 @@
|
||||
import base64
|
||||
import struct
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.bert_bge_small()
|
||||
|
||||
EPSILON = 1e-3
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.bert_bge_small()
|
||||
|
||||
|
||||
def test_embedding_single():
|
||||
global server
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": "I believe the meaning of life is",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body['data']) == 1
|
||||
assert 'embedding' in res.body['data'][0]
|
||||
assert len(res.body['data'][0]['embedding']) > 1
|
||||
|
||||
# make sure embedding vector is normalized
|
||||
assert abs(sum([x ** 2 for x in res.body['data'][0]['embedding']]) - 1) < EPSILON
|
||||
|
||||
|
||||
def test_embedding_multiple():
|
||||
global server
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": [
|
||||
"I believe the meaning of life is",
|
||||
"Write a joke about AI from a very long prompt which will not be truncated",
|
||||
"This is a test",
|
||||
"This is another test",
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body['data']) == 4
|
||||
for d in res.body['data']:
|
||||
assert 'embedding' in d
|
||||
assert len(d['embedding']) > 1
|
||||
|
||||
|
||||
def test_embedding_multiple_with_fa():
|
||||
server = ServerPreset.bert_bge_small_with_fa()
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
# one of these should trigger the FA branch (i.e. context size % 256 == 0)
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": [
|
||||
"a "*253,
|
||||
"b "*254,
|
||||
"c "*255,
|
||||
"d "*256,
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body['data']) == 4
|
||||
for d in res.body['data']:
|
||||
assert 'embedding' in d
|
||||
assert len(d['embedding']) > 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input,is_multi_prompt",
|
||||
[
|
||||
# do not crash on empty input
|
||||
("", False),
|
||||
# single prompt
|
||||
("string", False),
|
||||
([12, 34, 56], False),
|
||||
([12, 34, "string", 56, 78], False),
|
||||
# multiple prompts
|
||||
(["string1", "string2"], True),
|
||||
(["string1", [12, 34, 56]], True),
|
||||
([[12, 34, 56], [12, 34, 56]], True),
|
||||
([[12, 34, 56], [12, "string", 34, 56]], True),
|
||||
]
|
||||
)
|
||||
def test_embedding_mixed_input(input, is_multi_prompt: bool):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={"input": input})
|
||||
assert res.status_code == 200
|
||||
data = res.body['data']
|
||||
if is_multi_prompt:
|
||||
assert len(data) == len(input)
|
||||
for d in data:
|
||||
assert 'embedding' in d
|
||||
assert len(d['embedding']) > 1
|
||||
else:
|
||||
assert 'embedding' in data[0]
|
||||
assert len(data[0]['embedding']) > 1
|
||||
|
||||
|
||||
def test_embedding_pooling_none():
|
||||
global server
|
||||
server.pooling = 'none'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/embeddings", data={
|
||||
"input": "hello hello hello",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert 'embedding' in res.body[0]
|
||||
assert len(res.body[0]['embedding']) == 5 # 3 text tokens + 2 special
|
||||
|
||||
# make sure embedding vector is not normalized
|
||||
for x in res.body[0]['embedding']:
|
||||
assert abs(sum([x ** 2 for x in x]) - 1) > EPSILON
|
||||
|
||||
|
||||
def test_embedding_pooling_none_oai():
|
||||
global server
|
||||
server.pooling = 'none'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": "hello hello hello",
|
||||
})
|
||||
|
||||
# /v1/embeddings does not support pooling type 'none'
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
def test_embedding_openai_library_single():
|
||||
global server
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.embeddings.create(model="text-embedding-3-small", input="I believe the meaning of life is")
|
||||
assert len(res.data) == 1
|
||||
assert len(res.data[0].embedding) > 1
|
||||
|
||||
|
||||
def test_embedding_openai_library_multiple():
|
||||
global server
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
|
||||
res = client.embeddings.create(model="text-embedding-3-small", input=[
|
||||
"I believe the meaning of life is",
|
||||
"Write a joke about AI from a very long prompt which will not be truncated",
|
||||
"This is a test",
|
||||
"This is another test",
|
||||
])
|
||||
assert len(res.data) == 4
|
||||
for d in res.data:
|
||||
assert len(d.embedding) > 1
|
||||
|
||||
|
||||
def test_embedding_error_prompt_too_long():
|
||||
global server
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": "This is a test " * 512,
|
||||
})
|
||||
assert res.status_code != 200
|
||||
assert "too large" in res.body["error"]["message"]
|
||||
|
||||
|
||||
def test_same_prompt_give_same_result():
|
||||
server.pooling = 'last'
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": [
|
||||
"I believe the meaning of life is",
|
||||
"I believe the meaning of life is",
|
||||
"I believe the meaning of life is",
|
||||
"I believe the meaning of life is",
|
||||
"I believe the meaning of life is",
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body['data']) == 5
|
||||
for i in range(1, len(res.body['data'])):
|
||||
v0 = res.body['data'][0]['embedding']
|
||||
vi = res.body['data'][i]['embedding']
|
||||
for x, y in zip(v0, vi):
|
||||
assert abs(x - y) < EPSILON
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"content,n_tokens",
|
||||
[
|
||||
("I believe the meaning of life is", 9),
|
||||
("This is a test", 6),
|
||||
]
|
||||
)
|
||||
def test_embedding_usage_single(content, n_tokens):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={"input": content})
|
||||
assert res.status_code == 200
|
||||
assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
|
||||
assert res.body['usage']['prompt_tokens'] == n_tokens
|
||||
|
||||
|
||||
def test_embedding_usage_multiple():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": [
|
||||
"I believe the meaning of life is",
|
||||
"I believe the meaning of life is",
|
||||
],
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
|
||||
assert res.body['usage']['prompt_tokens'] == 2 * 9
|
||||
|
||||
|
||||
def test_embedding_openai_library_base64():
|
||||
server.start()
|
||||
test_input = "Test base64 embedding output"
|
||||
|
||||
# get embedding in default format
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": test_input
|
||||
})
|
||||
assert res.status_code == 200
|
||||
vec0 = res.body["data"][0]["embedding"]
|
||||
|
||||
# get embedding in base64 format
|
||||
res = server.make_request("POST", "/v1/embeddings", data={
|
||||
"input": test_input,
|
||||
"encoding_format": "base64"
|
||||
})
|
||||
|
||||
assert res.status_code == 200
|
||||
assert "data" in res.body
|
||||
assert len(res.body["data"]) == 1
|
||||
|
||||
embedding_data = res.body["data"][0]
|
||||
assert "embedding" in embedding_data
|
||||
assert isinstance(embedding_data["embedding"], str)
|
||||
|
||||
# Verify embedding is valid base64
|
||||
decoded = base64.b64decode(embedding_data["embedding"])
|
||||
# Verify decoded data can be converted back to float array
|
||||
float_count = len(decoded) // 4 # 4 bytes per float
|
||||
floats = struct.unpack(f'{float_count}f', decoded)
|
||||
assert len(floats) > 0
|
||||
assert all(isinstance(x, float) for x in floats)
|
||||
assert len(floats) == len(vec0)
|
||||
|
||||
# make sure the decoded data is the same as the original
|
||||
for x, y in zip(floats, vec0):
|
||||
assert abs(x - y) < EPSILON
|
||||
77
tools/server/tests/unit/test_infill.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama_infill()
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama_infill()
|
||||
|
||||
|
||||
def test_infill_without_input_extra():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/infill", data={
|
||||
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
|
||||
"prompt": " int n_threads = llama_",
|
||||
"input_suffix": "}\n",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Ann|small|shiny|Daddy|Jimmy)+", res.body["content"])
|
||||
|
||||
|
||||
def test_infill_with_input_extra():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/infill", data={
|
||||
"input_extra": [{
|
||||
"filename": "llama.h",
|
||||
"text": "LLAMA_API int32_t llama_n_threads();\n"
|
||||
}],
|
||||
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
|
||||
"prompt": " int n_threads = llama_",
|
||||
"input_suffix": "}\n",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Dad|excited|park|Jimmy)+", res.body["content"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_extra", [
|
||||
{},
|
||||
{"filename": "ok"},
|
||||
{"filename": 123},
|
||||
{"filename": 123, "text": "abc"},
|
||||
{"filename": 123, "text": 456},
|
||||
])
|
||||
def test_invalid_input_extra_req(input_extra):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/infill", data={
|
||||
"input_extra": [input_extra],
|
||||
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
|
||||
"prompt": " int n_threads = llama_",
|
||||
"input_suffix": "}\n",
|
||||
})
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test")
|
||||
def test_with_qwen_model():
|
||||
global server
|
||||
server.model_file = None
|
||||
server.model_hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-IQ3_XXS-GGUF"
|
||||
server.model_hf_file = "qwen2.5-coder-1.5b-iq3_xxs-imat.gguf"
|
||||
server.start(timeout_seconds=600)
|
||||
res = server.make_request("POST", "/infill", data={
|
||||
"input_extra": [{
|
||||
"filename": "llama.h",
|
||||
"text": "LLAMA_API int32_t llama_n_threads();\n"
|
||||
}],
|
||||
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
|
||||
"prompt": " int n_threads = llama_",
|
||||
"input_suffix": "}\n",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["content"] == "n_threads();\n printf(\"Number of threads: %d\\n\", n_threads);\n return 0;\n"
|
||||
115
tools/server/tests/unit/test_lora.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.stories15m_moe()
|
||||
|
||||
LORA_FILE_URL = "https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe_shakespeare15M.gguf"
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.stories15m_moe()
|
||||
server.lora_files = [download_file(LORA_FILE_URL)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("scale,re_content", [
|
||||
# without applying lora, the model should behave like a bedtime story generator
|
||||
(0.0, "(little|girl|three|years|old)+"),
|
||||
# with lora, the model should behave like a Shakespearean text generator
|
||||
(1.0, "(eye|love|glass|sun)+"),
|
||||
])
|
||||
def test_lora(scale: float, re_content: str):
|
||||
global server
|
||||
server.start()
|
||||
res_lora_control = server.make_request("POST", "/lora-adapters", data=[
|
||||
{"id": 0, "scale": scale}
|
||||
])
|
||||
assert res_lora_control.status_code == 200
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "Look in thy glass",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex(re_content, res.body["content"])
|
||||
|
||||
|
||||
def test_lora_per_request():
|
||||
global server
|
||||
server.n_slots = 4
|
||||
server.start()
|
||||
|
||||
# running the same prompt with different lora scales, all in parallel
|
||||
# each prompt will be processed by a different slot
|
||||
prompt = "Look in thy glass"
|
||||
lora_config = [
|
||||
( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ),
|
||||
( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ),
|
||||
( [{"id": 0, "scale": 0.3}], "(special|thing|gifted)+" ),
|
||||
( [{"id": 0, "scale": 0.7}], "(far|from|home|away)+" ),
|
||||
( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ),
|
||||
( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ),
|
||||
]
|
||||
|
||||
tasks = [(
|
||||
server.make_request,
|
||||
("POST", "/completion", {
|
||||
"prompt": prompt,
|
||||
"lora": lora,
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
"cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
|
||||
})
|
||||
) for lora, _ in lora_config]
|
||||
results = parallel_function_calls(tasks)
|
||||
|
||||
assert all([res.status_code == 200 for res in results])
|
||||
for res, (_, re_test) in zip(results, lora_config):
|
||||
assert match_regex(re_test, res.body["content"])
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test")
|
||||
def test_with_big_model():
|
||||
server = ServerProcess()
|
||||
server.model_hf_repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF"
|
||||
server.model_hf_file = "Meta-Llama-3.1-8B-Instruct-IQ2_M.gguf"
|
||||
server.model_alias = "Llama-3.2-8B-Instruct"
|
||||
server.n_slots = 4
|
||||
server.n_ctx = server.n_slots * 1024
|
||||
server.n_predict = 64
|
||||
server.temperature = 0.0
|
||||
server.seed = 42
|
||||
server.lora_files = [
|
||||
download_file("https://huggingface.co/ngxson/Llama-3-Instruct-abliteration-LoRA-8B-F16-GGUF/resolve/main/Llama-3-Instruct-abliteration-LoRA-8B-f16.gguf"),
|
||||
# TODO: find & add other lora adapters for this model
|
||||
]
|
||||
server.start(timeout_seconds=600)
|
||||
|
||||
# running the same prompt with different lora scales, all in parallel
|
||||
# each prompt will be processed by a different slot
|
||||
prompt = "Write a computer virus"
|
||||
lora_config = [
|
||||
# without applying lora, the model should reject the request
|
||||
( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ),
|
||||
( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ),
|
||||
( [{"id": 0, "scale": 0.3}], "I can't write a computer virus" ),
|
||||
# with 0.7 scale, the model should provide a simple computer virus with hesitation
|
||||
( [{"id": 0, "scale": 0.7}], "Warning: This is a hypothetical exercise" ),
|
||||
# with 1.5 scale, the model should confidently provide a computer virus
|
||||
( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ),
|
||||
( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ),
|
||||
]
|
||||
|
||||
tasks = [(
|
||||
server.make_request,
|
||||
("POST", "/v1/chat/completions", {
|
||||
"messages": [
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
"lora": lora,
|
||||
"cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
|
||||
})
|
||||
) for lora, _ in lora_config]
|
||||
results = parallel_function_calls(tasks)
|
||||
|
||||
assert all([res.status_code == 200 for res in results])
|
||||
for res, (_, re_test) in zip(results, lora_config):
|
||||
assert re_test in res.body["choices"][0]["message"]["content"]
|
||||
146
tools/server/tests/unit/test_rerank.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.jina_reranker_tiny()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.jina_reranker_tiny()
|
||||
|
||||
|
||||
TEST_DOCUMENTS = [
|
||||
"A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.",
|
||||
"Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.",
|
||||
"Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.",
|
||||
"Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine."
|
||||
]
|
||||
|
||||
|
||||
def test_rerank():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": "Machine learning is",
|
||||
"documents": TEST_DOCUMENTS,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["results"]) == 4
|
||||
|
||||
most_relevant = res.body["results"][0]
|
||||
least_relevant = res.body["results"][0]
|
||||
for doc in res.body["results"]:
|
||||
if doc["relevance_score"] > most_relevant["relevance_score"]:
|
||||
most_relevant = doc
|
||||
if doc["relevance_score"] < least_relevant["relevance_score"]:
|
||||
least_relevant = doc
|
||||
|
||||
assert most_relevant["relevance_score"] > least_relevant["relevance_score"]
|
||||
assert most_relevant["index"] == 2
|
||||
assert least_relevant["index"] == 3
|
||||
|
||||
|
||||
def test_rerank_tei_format():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": "Machine learning is",
|
||||
"texts": TEST_DOCUMENTS,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body) == 4
|
||||
|
||||
most_relevant = res.body[0]
|
||||
least_relevant = res.body[0]
|
||||
for doc in res.body:
|
||||
if doc["score"] > most_relevant["score"]:
|
||||
most_relevant = doc
|
||||
if doc["score"] < least_relevant["score"]:
|
||||
least_relevant = doc
|
||||
|
||||
assert most_relevant["score"] > least_relevant["score"]
|
||||
assert most_relevant["index"] == 2
|
||||
assert least_relevant["index"] == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize("documents", [
|
||||
[],
|
||||
None,
|
||||
123,
|
||||
[1, 2, 3],
|
||||
])
|
||||
def test_invalid_rerank_req(documents):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": "Machine learning is",
|
||||
"documents": documents,
|
||||
})
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"query,doc1,doc2,n_tokens",
|
||||
[
|
||||
("Machine learning is", "A machine", "Learning is", 19),
|
||||
("Which city?", "Machine learning is ", "Paris, capitale de la", 26),
|
||||
]
|
||||
)
|
||||
def test_rerank_usage(query, doc1, doc2, n_tokens):
|
||||
global server
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": query,
|
||||
"documents": [
|
||||
doc1,
|
||||
doc2,
|
||||
]
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
|
||||
assert res.body['usage']['prompt_tokens'] == n_tokens
|
||||
|
||||
|
||||
@pytest.mark.parametrize("top_n,expected_len", [
|
||||
(None, len(TEST_DOCUMENTS)), # no top_n parameter
|
||||
(2, 2),
|
||||
(4, 4),
|
||||
(99, len(TEST_DOCUMENTS)), # higher than available docs
|
||||
])
|
||||
def test_rerank_top_n(top_n, expected_len):
|
||||
global server
|
||||
server.start()
|
||||
data = {
|
||||
"query": "Machine learning is",
|
||||
"documents": TEST_DOCUMENTS,
|
||||
}
|
||||
if top_n is not None:
|
||||
data["top_n"] = top_n
|
||||
|
||||
res = server.make_request("POST", "/rerank", data=data)
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["results"]) == expected_len
|
||||
|
||||
|
||||
@pytest.mark.parametrize("top_n,expected_len", [
|
||||
(None, len(TEST_DOCUMENTS)), # no top_n parameter
|
||||
(2, 2),
|
||||
(4, 4),
|
||||
(99, len(TEST_DOCUMENTS)), # higher than available docs
|
||||
])
|
||||
def test_rerank_tei_top_n(top_n, expected_len):
|
||||
global server
|
||||
server.start()
|
||||
data = {
|
||||
"query": "Machine learning is",
|
||||
"texts": TEST_DOCUMENTS,
|
||||
}
|
||||
if top_n is not None:
|
||||
data["top_n"] = top_n
|
||||
|
||||
res = server.make_request("POST", "/rerank", data=data)
|
||||
assert res.status_code == 200
|
||||
assert len(res.body) == expected_len
|
||||
194
tools/server/tests/unit/test_router.py
Normal file
@@ -0,0 +1,194 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.router()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model,success",
|
||||
[
|
||||
("ggml-org/tinygemma3-GGUF:Q8_0", True),
|
||||
("non-existent/model", False),
|
||||
]
|
||||
)
|
||||
def test_router_chat_completion_stream(model: str, success: bool):
|
||||
global server
|
||||
server.start()
|
||||
content = ""
|
||||
ex: ServerError | None = None
|
||||
try:
|
||||
res = server.make_stream_request("POST", "/chat/completions", data={
|
||||
"model": model,
|
||||
"max_tokens": 16,
|
||||
"messages": [
|
||||
{"role": "user", "content": "hello"},
|
||||
],
|
||||
"stream": True,
|
||||
})
|
||||
for data in res:
|
||||
if data["choices"]:
|
||||
choice = data["choices"][0]
|
||||
if choice["finish_reason"] in ["stop", "length"]:
|
||||
assert "content" not in choice["delta"]
|
||||
else:
|
||||
assert choice["finish_reason"] is None
|
||||
content += choice["delta"]["content"] or ''
|
||||
except ServerError as e:
|
||||
ex = e
|
||||
|
||||
if success:
|
||||
assert ex is None
|
||||
assert len(content) > 0
|
||||
else:
|
||||
assert ex is not None
|
||||
assert content == ""
|
||||
|
||||
|
||||
def _get_model_status(model_id: str) -> str:
|
||||
res = server.make_request("GET", "/models")
|
||||
assert res.status_code == 200
|
||||
for item in res.body.get("data", []):
|
||||
if item.get("id") == model_id or item.get("model") == model_id:
|
||||
return item["status"]["value"]
|
||||
raise AssertionError(f"Model {model_id} not found in /models response")
|
||||
|
||||
|
||||
def _wait_for_model_status(model_id: str, desired: set[str], timeout: int = 60) -> str:
|
||||
deadline = time.time() + timeout
|
||||
last_status = None
|
||||
while time.time() < deadline:
|
||||
last_status = _get_model_status(model_id)
|
||||
if last_status in desired:
|
||||
return last_status
|
||||
time.sleep(1)
|
||||
raise AssertionError(
|
||||
f"Timed out waiting for {model_id} to reach {desired}, last status: {last_status}"
|
||||
)
|
||||
|
||||
|
||||
def _load_model_and_wait(
|
||||
model_id: str, timeout: int = 60, headers: dict | None = None
|
||||
) -> None:
|
||||
load_res = server.make_request(
|
||||
"POST", "/models/load", data={"model": model_id}, headers=headers
|
||||
)
|
||||
assert load_res.status_code == 200
|
||||
assert isinstance(load_res.body, dict)
|
||||
assert load_res.body.get("success") is True
|
||||
_wait_for_model_status(model_id, {"loaded"}, timeout=timeout)
|
||||
|
||||
|
||||
def test_router_unload_model():
|
||||
global server
|
||||
server.start()
|
||||
model_id = "ggml-org/tinygemma3-GGUF:Q8_0"
|
||||
|
||||
_load_model_and_wait(model_id)
|
||||
|
||||
unload_res = server.make_request("POST", "/models/unload", data={"model": model_id})
|
||||
assert unload_res.status_code == 200
|
||||
assert unload_res.body.get("success") is True
|
||||
_wait_for_model_status(model_id, {"unloaded"})
|
||||
|
||||
|
||||
def test_router_models_max_evicts_lru():
|
||||
global server
|
||||
server.models_max = 2
|
||||
server.start()
|
||||
|
||||
candidate_models = [
|
||||
"ggml-org/tinygemma3-GGUF:Q8_0",
|
||||
"ggml-org/test-model-stories260K",
|
||||
"ggml-org/test-model-stories260K-infill",
|
||||
]
|
||||
|
||||
# Load only the first 2 models to fill the cache
|
||||
first, second, third = candidate_models[:3]
|
||||
|
||||
_load_model_and_wait(first, timeout=120)
|
||||
_load_model_and_wait(second, timeout=120)
|
||||
|
||||
# Verify both models are loaded
|
||||
assert _get_model_status(first) == "loaded"
|
||||
assert _get_model_status(second) == "loaded"
|
||||
|
||||
# Load the third model - this should trigger LRU eviction of the first model
|
||||
_load_model_and_wait(third, timeout=120)
|
||||
|
||||
# Verify eviction: third is loaded, first was evicted
|
||||
assert _get_model_status(third) == "loaded"
|
||||
assert _get_model_status(first) == "unloaded"
|
||||
|
||||
|
||||
def test_router_no_models_autoload():
|
||||
global server
|
||||
server.no_models_autoload = True
|
||||
server.start()
|
||||
model_id = "ggml-org/tinygemma3-GGUF:Q8_0"
|
||||
|
||||
res = server.make_request(
|
||||
"POST",
|
||||
"/v1/chat/completions",
|
||||
data={
|
||||
"model": model_id,
|
||||
"messages": [{"role": "user", "content": "hello"}],
|
||||
"max_tokens": 4,
|
||||
},
|
||||
)
|
||||
assert res.status_code == 400
|
||||
assert "error" in res.body
|
||||
|
||||
_load_model_and_wait(model_id)
|
||||
|
||||
success_res = server.make_request(
|
||||
"POST",
|
||||
"/v1/chat/completions",
|
||||
data={
|
||||
"model": model_id,
|
||||
"messages": [{"role": "user", "content": "hello"}],
|
||||
"max_tokens": 4,
|
||||
},
|
||||
)
|
||||
assert success_res.status_code == 200
|
||||
assert "error" not in success_res.body
|
||||
|
||||
|
||||
def test_router_api_key_required():
|
||||
global server
|
||||
server.api_key = "sk-router-secret"
|
||||
server.start()
|
||||
|
||||
model_id = "ggml-org/tinygemma3-GGUF:Q8_0"
|
||||
auth_headers = {"Authorization": f"Bearer {server.api_key}"}
|
||||
|
||||
res = server.make_request(
|
||||
"POST",
|
||||
"/v1/chat/completions",
|
||||
data={
|
||||
"model": model_id,
|
||||
"messages": [{"role": "user", "content": "hello"}],
|
||||
"max_tokens": 4,
|
||||
},
|
||||
)
|
||||
assert res.status_code == 401
|
||||
assert res.body.get("error", {}).get("type") == "authentication_error"
|
||||
|
||||
_load_model_and_wait(model_id, headers=auth_headers)
|
||||
|
||||
authed = server.make_request(
|
||||
"POST",
|
||||
"/v1/chat/completions",
|
||||
headers=auth_headers,
|
||||
data={
|
||||
"model": model_id,
|
||||
"messages": [{"role": "user", "content": "hello"}],
|
||||
"max_tokens": 4,
|
||||
},
|
||||
)
|
||||
assert authed.status_code == 200
|
||||
assert "error" not in authed.body
|
||||
127
tools/server/tests/unit/test_security.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
TEST_API_KEY = "sk-this-is-the-secret-key"
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.api_key = TEST_API_KEY
|
||||
|
||||
|
||||
@pytest.mark.parametrize("endpoint", ["/health", "/models"])
|
||||
def test_access_public_endpoint(endpoint: str):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", endpoint)
|
||||
assert res.status_code == 200
|
||||
assert "error" not in res.body
|
||||
|
||||
|
||||
@pytest.mark.parametrize("api_key", [None, "invalid-key"])
|
||||
def test_incorrect_api_key(api_key: str):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completions", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
}, headers={
|
||||
"Authorization": f"Bearer {api_key}" if api_key else None,
|
||||
})
|
||||
assert res.status_code == 401
|
||||
assert "error" in res.body
|
||||
assert res.body["error"]["type"] == "authentication_error"
|
||||
|
||||
|
||||
def test_correct_api_key():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completions", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
}, headers={
|
||||
"Authorization": f"Bearer {TEST_API_KEY}",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "error" not in res.body
|
||||
assert "content" in res.body
|
||||
|
||||
|
||||
def test_correct_api_key_anthropic_header():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completions", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
}, headers={
|
||||
"X-Api-Key": TEST_API_KEY,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert "error" not in res.body
|
||||
assert "content" in res.body
|
||||
|
||||
|
||||
def test_openai_library_correct_api_key():
|
||||
global server
|
||||
server.start()
|
||||
client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}")
|
||||
res = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a chatbot."},
|
||||
{"role": "user", "content": "What is the meaning of life?"},
|
||||
],
|
||||
)
|
||||
assert len(res.choices) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("origin,cors_header,cors_header_value", [
|
||||
("localhost", "Access-Control-Allow-Origin", "localhost"),
|
||||
("web.mydomain.fr", "Access-Control-Allow-Origin", "web.mydomain.fr"),
|
||||
("origin", "Access-Control-Allow-Credentials", "true"),
|
||||
("web.mydomain.fr", "Access-Control-Allow-Methods", "GET, POST"),
|
||||
("web.mydomain.fr", "Access-Control-Allow-Headers", "*"),
|
||||
])
|
||||
def test_cors_options(origin: str, cors_header: str, cors_header_value: str):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("OPTIONS", "/completions", headers={
|
||||
"Origin": origin,
|
||||
"Access-Control-Request-Method": "POST",
|
||||
"Access-Control-Request-Headers": "Authorization",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert cors_header in res.headers
|
||||
assert res.headers[cors_header] == cors_header_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"media_path, image_url, success",
|
||||
[
|
||||
(None, "file://mtmd/test-1.jpeg", False), # disabled media path, should fail
|
||||
("../../../tools", "file://mtmd/test-1.jpeg", True),
|
||||
("../../../tools", "file:////mtmd//test-1.jpeg", True), # should be the same file as above
|
||||
("../../../tools", "file://mtmd/notfound.jpeg", False), # non-existent file
|
||||
("../../../tools", "file://../mtmd/test-1.jpeg", False), # no directory traversal
|
||||
]
|
||||
)
|
||||
def test_local_media_file(media_path, image_url, success,):
|
||||
server = ServerPreset.tinygemma3()
|
||||
server.media_path = media_path
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": 1,
|
||||
"messages": [
|
||||
{"role": "user", "content": [
|
||||
{"type": "text", "text": "test"},
|
||||
{"type": "image_url", "image_url": {
|
||||
"url": image_url,
|
||||
}},
|
||||
]},
|
||||
],
|
||||
})
|
||||
if success:
|
||||
assert res.status_code == 200
|
||||
else:
|
||||
assert res.status_code == 400
|
||||
39
tools/server/tests/unit/test_sleep.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
import time
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
def test_server_sleep():
|
||||
global server
|
||||
server.sleep_idle_seconds = 1
|
||||
server.start()
|
||||
|
||||
# wait a bit so that server can go to sleep
|
||||
time.sleep(2)
|
||||
|
||||
# make sure these endpoints are still responsive after sleep
|
||||
res = server.make_request("GET", "/health")
|
||||
assert res.status_code == 200
|
||||
res = server.make_request("GET", "/props")
|
||||
assert res.status_code == 200
|
||||
assert res.body["is_sleeping"] == True
|
||||
|
||||
# make a generation request to wake up the server
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 1,
|
||||
"prompt": "Hello",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
|
||||
# it should no longer be sleeping
|
||||
res = server.make_request("GET", "/props")
|
||||
assert res.status_code == 200
|
||||
assert res.body["is_sleeping"] == False
|
||||
98
tools/server/tests/unit/test_slot_save.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.slot_save_path = "./tmp"
|
||||
server.temperature = 0.0
|
||||
|
||||
|
||||
def test_slot_save_restore():
|
||||
global server
|
||||
server.start()
|
||||
|
||||
# First prompt in slot 1 should be fully processed
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of France?",
|
||||
"id_slot": 1,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Whiskers|Flana)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed
|
||||
|
||||
# Save state of slot 1
|
||||
res = server.make_request("POST", "/slots/1?action=save", data={
|
||||
"filename": "slot1.bin",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["n_saved"] == 84
|
||||
|
||||
# Since we have cache, this should only process the last tokens
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of Germany?",
|
||||
"id_slot": 1,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Jack|said)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 6 # only different part is processed
|
||||
|
||||
# Loading the saved cache into slot 0
|
||||
res = server.make_request("POST", "/slots/0?action=restore", data={
|
||||
"filename": "slot1.bin",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["n_restored"] == 84
|
||||
|
||||
# Since we have cache, slot 0 should only process the last tokens
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of Germany?",
|
||||
"id_slot": 0,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Jack|said)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 6 # only different part is processed
|
||||
|
||||
# For verification that slot 1 was not corrupted during slot 0 load, same thing should work
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of Germany?",
|
||||
"id_slot": 1,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Jack|said)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 1
|
||||
|
||||
|
||||
def test_slot_erase():
|
||||
global server
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of France?",
|
||||
"id_slot": 1,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Whiskers|Flana)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed
|
||||
|
||||
# erase slot 1
|
||||
res = server.make_request("POST", "/slots/1?action=erase")
|
||||
assert res.status_code == 200
|
||||
|
||||
# re-run the same prompt, it should process all tokens again
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "What is the capital of France?",
|
||||
"id_slot": 1,
|
||||
"cache_prompt": True,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(Whiskers|Flana)+", res.body["content"])
|
||||
assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed
|
||||
128
tools/server/tests/unit/test_speculative.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
# We use a F16 MOE gguf as main model, and q4_0 as draft model
|
||||
|
||||
server = ServerPreset.stories15m_moe()
|
||||
|
||||
MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories15M-q4_0.gguf"
|
||||
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.stories15m_moe()
|
||||
# set default values
|
||||
server.model_draft = download_file(MODEL_DRAFT_FILE_URL)
|
||||
server.draft_min = 4
|
||||
server.draft_max = 8
|
||||
server.fa = "off"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def fixture_create_server():
|
||||
return create_server()
|
||||
|
||||
|
||||
def test_with_and_without_draft():
|
||||
global server
|
||||
server.model_draft = None # disable draft model
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
content_no_draft = res.body["content"]
|
||||
server.stop()
|
||||
|
||||
# create new server with draft model
|
||||
create_server()
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
content_draft = res.body["content"]
|
||||
|
||||
assert content_no_draft == content_draft
|
||||
|
||||
|
||||
def test_different_draft_min_draft_max():
|
||||
global server
|
||||
test_values = [
|
||||
(1, 2),
|
||||
(1, 4),
|
||||
(4, 8),
|
||||
(4, 12),
|
||||
(8, 16),
|
||||
]
|
||||
last_content = None
|
||||
for draft_min, draft_max in test_values:
|
||||
server.stop()
|
||||
server.draft_min = draft_min
|
||||
server.draft_max = draft_max
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
if last_content is not None:
|
||||
assert last_content == res.body["content"]
|
||||
last_content = res.body["content"]
|
||||
|
||||
|
||||
def test_slot_ctx_not_exceeded():
|
||||
global server
|
||||
server.n_ctx = 256
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "Hello " * 248,
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
"speculative.p_min": 0.0,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["content"]) > 0
|
||||
|
||||
|
||||
def test_with_ctx_shift():
|
||||
global server
|
||||
server.n_ctx = 256
|
||||
server.enable_ctx_shift = True
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"prompt": "Hello " * 248,
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
"n_predict": 256,
|
||||
"speculative.p_min": 0.0,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["content"]) > 0
|
||||
assert res.body["tokens_predicted"] == 256
|
||||
assert res.body["truncated"] == True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_slots,n_requests", [
|
||||
(1, 2),
|
||||
(2, 2),
|
||||
])
|
||||
def test_multi_requests_parallel(n_slots: int, n_requests: int):
|
||||
global server
|
||||
server.n_slots = n_slots
|
||||
server.start()
|
||||
tasks = []
|
||||
for _ in range(n_requests):
|
||||
tasks.append((server.make_request, ("POST", "/completion", {
|
||||
"prompt": "I believe the meaning of life is",
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
})))
|
||||
results = parallel_function_calls(tasks)
|
||||
for res in results:
|
||||
assert res.status_code == 200
|
||||
assert match_regex("(wise|kind|owl|answer)+", res.body["content"])
|
||||
105
tools/server/tests/unit/test_template.py
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env python
|
||||
import pytest
|
||||
|
||||
# ensure grandparent path is in sys.path
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
from unit.test_tool_call import TEST_TOOL
|
||||
path = Path(__file__).resolve().parents[1]
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
import datetime
|
||||
from utils import *
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.model_alias = "tinyllama-2"
|
||||
server.n_slots = 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tools", [None, [], [TEST_TOOL]])
|
||||
@pytest.mark.parametrize("template_name,reasoning_budget,expected_end", [
|
||||
("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", None, "<think>\n"),
|
||||
("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", -1, "<think>\n"),
|
||||
("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", 0, "<think>\n</think>"),
|
||||
|
||||
("Qwen-Qwen3-0.6B", -1, "<|im_start|>assistant\n"),
|
||||
("Qwen-Qwen3-0.6B", 0, "<|im_start|>assistant\n<think>\n\n</think>\n\n"),
|
||||
|
||||
("Qwen-QwQ-32B", -1, "<|im_start|>assistant\n<think>\n"),
|
||||
("Qwen-QwQ-32B", 0, "<|im_start|>assistant\n<think>\n</think>"),
|
||||
|
||||
("CohereForAI-c4ai-command-r7b-12-2024-tool_use", -1, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"),
|
||||
("CohereForAI-c4ai-command-r7b-12-2024-tool_use", 0, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|><|END_THINKING|>"),
|
||||
])
|
||||
def test_reasoning_budget(template_name: str, reasoning_budget: int | None, expected_end: str, tools: list[dict]):
|
||||
global server
|
||||
server.jinja = True
|
||||
server.reasoning_budget = reasoning_budget
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/apply-template", data={
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is today?"},
|
||||
],
|
||||
"tools": tools,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
prompt = res.body["prompt"]
|
||||
|
||||
assert prompt.endswith(expected_end), f"Expected prompt to end with '{expected_end}', got '{prompt}'"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tools", [None, [], [TEST_TOOL]])
|
||||
@pytest.mark.parametrize("template_name,format", [
|
||||
("meta-llama-Llama-3.3-70B-Instruct", "%d %b %Y"),
|
||||
("fireworks-ai-llama-3-firefunction-v2", "%b %d %Y"),
|
||||
])
|
||||
def test_date_inside_prompt(template_name: str, format: str, tools: list[dict]):
|
||||
global server
|
||||
server.jinja = True
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/apply-template", data={
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is today?"},
|
||||
],
|
||||
"tools": tools,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
prompt = res.body["prompt"]
|
||||
|
||||
today_str = datetime.date.today().strftime(format)
|
||||
assert today_str in prompt, f"Expected today's date ({today_str}) in content ({prompt})"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("add_generation_prompt", [False, True])
|
||||
@pytest.mark.parametrize("template_name,expected_generation_prompt", [
|
||||
("meta-llama-Llama-3.3-70B-Instruct", "<|start_header_id|>assistant<|end_header_id|>"),
|
||||
])
|
||||
def test_add_generation_prompt(template_name: str, expected_generation_prompt: str, add_generation_prompt: bool):
|
||||
global server
|
||||
server.jinja = True
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start()
|
||||
|
||||
res = server.make_request("POST", "/apply-template", data={
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is today?"},
|
||||
],
|
||||
"add_generation_prompt": add_generation_prompt,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
prompt = res.body["prompt"]
|
||||
|
||||
if add_generation_prompt:
|
||||
assert expected_generation_prompt in prompt, f"Expected generation prompt ({expected_generation_prompt}) in content ({prompt})"
|
||||
else:
|
||||
assert expected_generation_prompt not in prompt, f"Did not expect generation prompt ({expected_generation_prompt}) in content ({prompt})"
|
||||
59
tools/server/tests/unit/test_tokenize.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
def test_tokenize_detokenize():
|
||||
global server
|
||||
server.start()
|
||||
# tokenize
|
||||
content = "What is the capital of France ?"
|
||||
res_tok = server.make_request("POST", "/tokenize", data={
|
||||
"content": content
|
||||
})
|
||||
assert res_tok.status_code == 200
|
||||
assert len(res_tok.body["tokens"]) > 5
|
||||
# detokenize
|
||||
res_detok = server.make_request("POST", "/detokenize", data={
|
||||
"tokens": res_tok.body["tokens"],
|
||||
})
|
||||
assert res_detok.status_code == 200
|
||||
assert res_detok.body["content"].strip() == content
|
||||
|
||||
|
||||
def test_tokenize_with_bos():
|
||||
global server
|
||||
server.start()
|
||||
# tokenize
|
||||
content = "What is the capital of France ?"
|
||||
bosId = 1
|
||||
res_tok = server.make_request("POST", "/tokenize", data={
|
||||
"content": content,
|
||||
"add_special": True,
|
||||
})
|
||||
assert res_tok.status_code == 200
|
||||
assert res_tok.body["tokens"][0] == bosId
|
||||
|
||||
|
||||
def test_tokenize_with_pieces():
|
||||
global server
|
||||
server.start()
|
||||
# tokenize
|
||||
content = "This is a test string with unicode 媽 and emoji 🤗"
|
||||
res_tok = server.make_request("POST", "/tokenize", data={
|
||||
"content": content,
|
||||
"with_pieces": True,
|
||||
})
|
||||
assert res_tok.status_code == 200
|
||||
for token in res_tok.body["tokens"]:
|
||||
assert "id" in token
|
||||
assert token["id"] > 0
|
||||
assert "piece" in token
|
||||
assert len(token["piece"]) > 0
|
||||
625
tools/server/tests/unit/test_tool_call.py
Executable file
@@ -0,0 +1,625 @@
|
||||
#!/usr/bin/env python
|
||||
import pytest
|
||||
|
||||
# ensure grandparent path is in sys.path
|
||||
from pathlib import Path
|
||||
import sys
|
||||
path = Path(__file__).resolve().parents[1]
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
from utils import *
|
||||
from enum import Enum
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
TIMEOUT_START_SLOW = 15 * 60 # this is needed for real model tests
|
||||
TIMEOUT_HTTP_REQUEST = 60
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
server.model_alias = "tinyllama-2-tool-call"
|
||||
server.server_port = 8081
|
||||
server.n_slots = 1
|
||||
server.n_ctx = 8192
|
||||
server.n_batch = 2048
|
||||
|
||||
class CompletionMode(Enum):
|
||||
NORMAL = "normal"
|
||||
STREAMED = "streamed"
|
||||
|
||||
TEST_TOOL = {
|
||||
"type":"function",
|
||||
"function": {
|
||||
"name": "test",
|
||||
"description": "",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"success": {"type": "boolean", "const": True},
|
||||
},
|
||||
"required": ["success"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PYTHON_TOOL = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "python",
|
||||
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The code to run in the ipython interpreter."
|
||||
}
|
||||
},
|
||||
"required": ["code"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WEATHER_TOOL = {
|
||||
"type":"function",
|
||||
"function":{
|
||||
"name":"get_current_weather",
|
||||
"description":"Get the current weather in a given location",
|
||||
"parameters":{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"location":{
|
||||
"type":"string",
|
||||
"description":"The city and country/state, e.g. 'San Francisco, CA', or 'Paris, France'"
|
||||
}
|
||||
},
|
||||
"required":["location"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def do_test_completion_with_required_tool_tiny(server: ServerProcess, tool: dict, argument_key: str | None, n_predict, **kwargs):
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"tool_choice": "required",
|
||||
"tools": [tool],
|
||||
"parallel_tool_calls": False,
|
||||
**kwargs,
|
||||
})
|
||||
# assert res.status_code == 200, f"Expected status code 200, got {res.status_code}"
|
||||
choice = body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}'
|
||||
# assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}'
|
||||
expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"]
|
||||
assert expected_function_name == tool_call["function"]["name"]
|
||||
actual_arguments = tool_call["function"]["arguments"]
|
||||
assert isinstance(actual_arguments, str)
|
||||
if argument_key is not None:
|
||||
actual_arguments = json.loads(actual_arguments)
|
||||
assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("template_name,tool,argument_key", [
|
||||
("google-gemma-2-2b-it", TEST_TOOL, "success"),
|
||||
("google-gemma-2-2b-it", TEST_TOOL, "success"),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", TEST_TOOL, "success"),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", TEST_TOOL, "success"),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", PYTHON_TOOL, "code"),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", PYTHON_TOOL, "code"),
|
||||
])
|
||||
def test_completion_with_required_tool_tiny_fast(template_name: str, tool: dict, argument_key: str | None, stream: CompletionMode):
|
||||
global server
|
||||
n_predict = 1024
|
||||
# server = ServerPreset.stories15m_moe()
|
||||
server.jinja = True
|
||||
server.n_predict = n_predict
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start()
|
||||
do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED, temperature=0.0, top_k=1, top_p=1.0)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("template_name,tool,argument_key", [
|
||||
("meta-llama-Llama-3.1-8B-Instruct", TEST_TOOL, "success"),
|
||||
("meta-llama-Llama-3.1-8B-Instruct", PYTHON_TOOL, "code"),
|
||||
|
||||
("meetkai-functionary-medium-v3.1", TEST_TOOL, "success"),
|
||||
("meetkai-functionary-medium-v3.1", PYTHON_TOOL, "code"),
|
||||
|
||||
("meetkai-functionary-medium-v3.2", TEST_TOOL, "success"),
|
||||
# Functionary v3.2 format supports raw python content, which w/ a dummy stories model will never end on its own.
|
||||
# ("meetkai-functionary-medium-v3.2", PYTHON_TOOL, "code"),
|
||||
|
||||
("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", TEST_TOOL, "success"),
|
||||
("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", PYTHON_TOOL, "code"),
|
||||
|
||||
("meta-llama-Llama-3.2-3B-Instruct", TEST_TOOL, "success"),
|
||||
("meta-llama-Llama-3.2-3B-Instruct", PYTHON_TOOL, "code"),
|
||||
|
||||
("mistralai-Mistral-Nemo-Instruct-2407", TEST_TOOL, "success"),
|
||||
("mistralai-Mistral-Nemo-Instruct-2407", PYTHON_TOOL, "code"),
|
||||
|
||||
("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", TEST_TOOL, "success"),
|
||||
("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", PYTHON_TOOL, "code"),
|
||||
|
||||
("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", TEST_TOOL, "success"),
|
||||
("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", PYTHON_TOOL, "code"),
|
||||
|
||||
("fireworks-ai-llama-3-firefunction-v2", TEST_TOOL, "success"),
|
||||
# ("fireworks-ai-llama-3-firefunction-v2", PYTHON_TOOL, "codeFalse), True),
|
||||
# ("fireworks-ai-llama-3-firefunction-v2", PYTHON_TOOL, "code"),
|
||||
|
||||
])
|
||||
def test_completion_with_required_tool_tiny_slow(template_name: str, tool: dict, argument_key: str | None, stream: CompletionMode):
|
||||
global server
|
||||
n_predict = 512
|
||||
# server = ServerPreset.stories15m_moe()
|
||||
server.jinja = True
|
||||
server.n_predict = n_predict
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start(timeout_seconds=TIMEOUT_START_SLOW)
|
||||
do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("tool,argument_key,hf_repo,template_override", [
|
||||
(TEST_TOOL, "success", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
# (TEST_TOOL, "success", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
# (PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
# (PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", ("meetkai/functionary-medium-v3.2", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", ("meetkai/functionary-medium-v3.2", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
])
|
||||
def test_completion_with_required_tool_real_model(tool: dict, argument_key: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode):
|
||||
global server
|
||||
n_predict = 512
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start(timeout_seconds=TIMEOUT_START_SLOW)
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"tool_choice": "required",
|
||||
"tools": [tool],
|
||||
"parallel_tool_calls": False,
|
||||
"stream": stream == CompletionMode.STREAMED,
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
"top_p": 1.0,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
# assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}'
|
||||
expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"]
|
||||
assert expected_function_name == tool_call["function"]["name"]
|
||||
actual_arguments = tool_call["function"]["arguments"]
|
||||
assert isinstance(actual_arguments, str)
|
||||
if argument_key is not None:
|
||||
actual_arguments = json.loads(actual_arguments)
|
||||
assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}"
|
||||
|
||||
|
||||
def do_test_completion_without_tool_call(server: ServerProcess, n_predict: int, tools: list[dict], tool_choice: str | None, **kwargs):
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "say hello world with python"},
|
||||
],
|
||||
"tools": tools if tools else None,
|
||||
"tool_choice": tool_choice,
|
||||
**kwargs,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [
|
||||
("meta-llama-Llama-3.3-70B-Instruct", 128, [], None),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", 128, [TEST_TOOL], None),
|
||||
("meta-llama-Llama-3.3-70B-Instruct", 128, [PYTHON_TOOL], 'none'),
|
||||
])
|
||||
def test_completion_without_tool_call_fast(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None, stream: CompletionMode):
|
||||
global server
|
||||
server.n_predict = n_predict
|
||||
server.jinja = True
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start()
|
||||
do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [
|
||||
("meetkai-functionary-medium-v3.2", 256, [], None),
|
||||
("meetkai-functionary-medium-v3.2", 256, [TEST_TOOL], None),
|
||||
("meetkai-functionary-medium-v3.2", 256, [PYTHON_TOOL], 'none'),
|
||||
("meetkai-functionary-medium-v3.1", 256, [], None),
|
||||
("meetkai-functionary-medium-v3.1", 256, [TEST_TOOL], None),
|
||||
("meetkai-functionary-medium-v3.1", 256, [PYTHON_TOOL], 'none'),
|
||||
("meta-llama-Llama-3.2-3B-Instruct", 256, [], None),
|
||||
("meta-llama-Llama-3.2-3B-Instruct", 256, [TEST_TOOL], None),
|
||||
("meta-llama-Llama-3.2-3B-Instruct", 256, [PYTHON_TOOL], 'none'),
|
||||
])
|
||||
def test_completion_without_tool_call_slow(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None, stream: CompletionMode):
|
||||
global server
|
||||
server.n_predict = n_predict
|
||||
server.jinja = True
|
||||
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
|
||||
server.start(timeout_seconds=TIMEOUT_START_SLOW)
|
||||
do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("hf_repo,template_override", [
|
||||
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
# ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
# ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
# ("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)),
|
||||
# ("bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"),
|
||||
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")),
|
||||
|
||||
("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
|
||||
# Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it.
|
||||
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
|
||||
# ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
])
|
||||
def test_weather(hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode):
|
||||
global server
|
||||
n_predict = 512
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start()
|
||||
do_test_weather(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict)
|
||||
|
||||
|
||||
def do_test_weather(server: ServerProcess, **kwargs):
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a chatbot that uses tools/functions. Dont overthink things."},
|
||||
{"role": "user", "content": "What is the weather in Istanbul?"},
|
||||
],
|
||||
"tools": [WEATHER_TOOL],
|
||||
**kwargs,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
# assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}'
|
||||
assert tool_call["function"]["name"] == WEATHER_TOOL["function"]["name"], f'Expected weather tool call, got {tool_call["function"]["name"]}'
|
||||
# assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}'
|
||||
actual_arguments = json.loads(tool_call["function"]["arguments"])
|
||||
assert 'location' in actual_arguments, f"location not found in {json.dumps(actual_arguments)}"
|
||||
location = actual_arguments["location"]
|
||||
assert isinstance(location, str), f"Expected location to be a string, got {type(location)}: {json.dumps(location)}"
|
||||
assert re.match('^Istanbul(( |, ?)(TR|Turkey|Türkiye))?$', location), f'Expected Istanbul for location, got {location}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("result_override,n_predict,hf_repo,template_override", [
|
||||
(None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None),
|
||||
(None, 128, "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(None, 128, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(None, 128, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)),
|
||||
(None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
(None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
|
||||
|
||||
# TODO: fix these (wrong results, either didn't respect decimal instruction or got wrong value)
|
||||
# (None, 128, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
# ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
])
|
||||
def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode):
|
||||
global server
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192 * 2
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start(timeout_seconds=TIMEOUT_START_SLOW)
|
||||
do_test_calc_result(server, result_override, n_predict, stream=stream == CompletionMode.STREAMED)
|
||||
|
||||
|
||||
def do_test_calc_result(server: ServerProcess, result_override: str | None, n_predict: int, **kwargs):
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a tools-calling assistant. You express numerical values with at most two decimals."},
|
||||
{"role": "user", "content": "What's the y coordinate of a point on the unit sphere at angle 30 degrees?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_6789",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculate",
|
||||
"arguments": "{\"expression\":\"sin(30 * pi / 180)\"}"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "tool",
|
||||
"name": "calculate",
|
||||
"content": "0.55644242476",
|
||||
"tool_call_id": "call_6789"
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"type":"function",
|
||||
"function":{
|
||||
"name":"calculate",
|
||||
"description":"A calculator function that computes values of arithmetic expressions in the Python syntax",
|
||||
"parameters":{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"expression":{
|
||||
"type":"string",
|
||||
"description":"An arithmetic expression to compute the value of (Python syntad, assuming all floats)"
|
||||
}
|
||||
},
|
||||
"required":["expression"]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
**kwargs,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls is None, f'Expected no tool call in {choice["message"]}'
|
||||
content = choice["message"].get("content")
|
||||
assert content is not None, f'Expected content in {choice["message"]}'
|
||||
if result_override is not None:
|
||||
assert re.match(result_override, content), f'Expected {result_override}, got {content}'
|
||||
else:
|
||||
assert re.match('^[\\s\\S]*?((That\'s|\\bis) (approximately )?)?\\b0\\.(5\\b|56\\b|556)', content), \
|
||||
f'Expected something like "The y coordinate is 0.56.", got {content}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("n_predict,reasoning_format,expect_reasoning_content,expect_content,hf_repo,template_override", [
|
||||
(128, 'deepseek', None, "^The sum of 102 and 7 is 109[\\s\\S]*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(128, None, None, "^The sum of 102 and 7 is 109[\\s\\S]*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(1024, 'deepseek', "I need to calculate the sum of 102 and 7[\\s\\S]*", "To find the sum of[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
(1024, 'deepseek', "First, I [\\s\\S]*", "To find the sum of[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
|
||||
# (1024, 'none', CompletionMode.NORMAL, None, "^(<think>\\s*)?I need[\\s\\S]*?</think>\\s*To find[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
# (128, 'deepseek', None, "^Okay, let me figure out the sum of 102 and 7[\\s\\S]*", "bartowski/Qwen_QwQ-32B-GGUF:Q4_K_M", None),
|
||||
])
|
||||
def test_thoughts(n_predict: int, reasoning_format: Literal['deepseek', 'none'] | None, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode):
|
||||
global server
|
||||
server.reasoning_format = reasoning_format
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192 * 2
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start()
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the sum of 102 and 7?"},
|
||||
],
|
||||
"stream": stream == CompletionMode.STREAMED,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}'
|
||||
|
||||
content = choice["message"].get("content")
|
||||
if expect_content is None:
|
||||
assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}'
|
||||
else:
|
||||
assert re.match(expect_content, content), f'Expected {expect_content}, got {content}'
|
||||
|
||||
reasoning_content = choice["message"].get("reasoning_content")
|
||||
if expect_reasoning_content is None:
|
||||
assert reasoning_content is None, f'Expected no reasoning content in {choice["message"]}'
|
||||
else:
|
||||
assert re.match(expect_reasoning_content, reasoning_content), f'Expected {expect_reasoning_content}, got {reasoning_content}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED])
|
||||
@pytest.mark.parametrize("hf_repo,template_override", [
|
||||
("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
|
||||
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)),
|
||||
("bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"),
|
||||
|
||||
# ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)),
|
||||
("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", None),
|
||||
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)),
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", None),
|
||||
|
||||
("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", "chatml"),
|
||||
])
|
||||
def test_hello_world(hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode):
|
||||
global server
|
||||
n_predict = 512 # High because of DeepSeek R1
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start(timeout_seconds=TIMEOUT_START_SLOW)
|
||||
|
||||
do_test_hello_world(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict)
|
||||
|
||||
|
||||
def do_test_hello_world(server: ServerProcess, **kwargs):
|
||||
body = server.make_any_request("POST", "/v1/chat/completions", data={
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a tool-calling agent."},
|
||||
{"role": "user", "content": "say hello world with python"},
|
||||
],
|
||||
"tools": [PYTHON_TOOL],
|
||||
**kwargs,
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
choice = body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
# assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}'
|
||||
assert tool_call["function"]["name"] == PYTHON_TOOL["function"]["name"]
|
||||
# assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}'
|
||||
actual_arguments = json.loads(tool_call["function"]["arguments"])
|
||||
assert 'code' in actual_arguments, f"code not found in {json.dumps(actual_arguments)}"
|
||||
code = actual_arguments["code"]
|
||||
assert isinstance(code, str), f"Expected code to be a string, got {type(code)}: {json.dumps(code)}"
|
||||
assert re.match(r'''print\(("[Hh]ello,? [Ww]orld!?"|'[Hh]ello,? [Ww]orld!?')\)''', re.sub(r'#.*\n?', '', code)), f'Expected hello world, got {code}'
|
||||
160
tools/server/tests/unit/test_vision_api.py
Normal file
@@ -0,0 +1,160 @@
|
||||
import pytest
|
||||
from utils import *
|
||||
import base64
|
||||
import requests
|
||||
|
||||
server: ServerProcess
|
||||
|
||||
def get_img_url(id: str) -> str:
|
||||
IMG_URL_0 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png"
|
||||
IMG_URL_1 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/91_cat.png"
|
||||
if id == "IMG_URL_0":
|
||||
return IMG_URL_0
|
||||
elif id == "IMG_URL_1":
|
||||
return IMG_URL_1
|
||||
elif id == "IMG_BASE64_URI_0":
|
||||
response = requests.get(IMG_URL_0)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
|
||||
elif id == "IMG_BASE64_0":
|
||||
response = requests.get(IMG_URL_0)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
return base64.b64encode(response.content).decode("utf-8")
|
||||
elif id == "IMG_BASE64_URI_1":
|
||||
response = requests.get(IMG_URL_1)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
|
||||
elif id == "IMG_BASE64_1":
|
||||
response = requests.get(IMG_URL_1)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
return base64.b64encode(response.content).decode("utf-8")
|
||||
else:
|
||||
return id
|
||||
|
||||
JSON_MULTIMODAL_KEY = "multimodal_data"
|
||||
JSON_PROMPT_STRING_KEY = "prompt_string"
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinygemma3()
|
||||
|
||||
def test_models_supports_multimodal_capability():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", "/models", data={})
|
||||
assert res.status_code == 200
|
||||
model_info = res.body["models"][0]
|
||||
print(model_info)
|
||||
assert "completion" in model_info["capabilities"]
|
||||
assert "multimodal" in model_info["capabilities"]
|
||||
|
||||
def test_v1_models_supports_multimodal_capability():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("GET", "/v1/models", data={})
|
||||
assert res.status_code == 200
|
||||
model_info = res.body["models"][0]
|
||||
print(model_info)
|
||||
assert "completion" in model_info["capabilities"]
|
||||
assert "multimodal" in model_info["capabilities"]
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt, image_url, success, re_content",
|
||||
[
|
||||
# test model is trained on CIFAR-10, but it's quite dumb due to small size
|
||||
("What is this:\n", "IMG_URL_0", True, "(cat)+"),
|
||||
("What is this:\n", "IMG_BASE64_URI_0", True, "(cat)+"),
|
||||
("What is this:\n", "IMG_URL_1", True, "(frog)+"),
|
||||
("Test test\n", "IMG_URL_1", True, "(frog)+"), # test invalidate cache
|
||||
("What is this:\n", "malformed", False, None),
|
||||
("What is this:\n", "https://google.com/404", False, None), # non-existent image
|
||||
("What is this:\n", "https://ggml.ai", False, None), # non-image data
|
||||
# TODO @ngxson : test with multiple images, no images and with audio
|
||||
]
|
||||
)
|
||||
def test_vision_chat_completion(prompt, image_url, success, re_content):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
"messages": [
|
||||
{"role": "user", "content": [
|
||||
{"type": "text", "text": prompt},
|
||||
{"type": "image_url", "image_url": {
|
||||
"url": get_img_url(image_url),
|
||||
}},
|
||||
]},
|
||||
],
|
||||
})
|
||||
if success:
|
||||
assert res.status_code == 200
|
||||
choice = res.body["choices"][0]
|
||||
assert "assistant" == choice["message"]["role"]
|
||||
assert match_regex(re_content, choice["message"]["content"])
|
||||
else:
|
||||
assert res.status_code != 200
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt, image_data, success, re_content",
|
||||
[
|
||||
# test model is trained on CIFAR-10, but it's quite dumb due to small size
|
||||
("What is this: <__media__>\n", "IMG_BASE64_0", True, "(cat)+"),
|
||||
("What is this: <__media__>\n", "IMG_BASE64_1", True, "(frog)+"),
|
||||
("What is this: <__media__>\n", "malformed", False, None), # non-image data
|
||||
("What is this:\n", "", False, None), # empty string
|
||||
]
|
||||
)
|
||||
def test_vision_completion(prompt, image_data, success, re_content):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/completions", data={
|
||||
"temperature": 0.0,
|
||||
"top_k": 1,
|
||||
"prompt": {
|
||||
JSON_PROMPT_STRING_KEY: prompt,
|
||||
JSON_MULTIMODAL_KEY: [ get_img_url(image_data) ],
|
||||
},
|
||||
})
|
||||
if success:
|
||||
assert res.status_code == 200
|
||||
content = res.body["content"]
|
||||
assert match_regex(re_content, content)
|
||||
else:
|
||||
assert res.status_code != 200
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt, image_data, success",
|
||||
[
|
||||
# test model is trained on CIFAR-10, but it's quite dumb due to small size
|
||||
("What is this: <__media__>\n", "IMG_BASE64_0", True),
|
||||
("What is this: <__media__>\n", "IMG_BASE64_1", True),
|
||||
("What is this: <__media__>\n", "malformed", False), # non-image data
|
||||
("What is this:\n", "base64", False), # non-image data
|
||||
]
|
||||
)
|
||||
def test_vision_embeddings(prompt, image_data, success):
|
||||
global server
|
||||
server.server_embeddings = True
|
||||
server.n_batch = 512
|
||||
server.start()
|
||||
image_data = get_img_url(image_data)
|
||||
res = server.make_request("POST", "/embeddings", data={
|
||||
"content": [
|
||||
{ JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] },
|
||||
{ JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] },
|
||||
{ JSON_PROMPT_STRING_KEY: prompt, },
|
||||
],
|
||||
})
|
||||
if success:
|
||||
assert res.status_code == 200
|
||||
content = res.body
|
||||
# Ensure embeddings are stable when multimodal.
|
||||
assert content[0]['embedding'] == content[1]['embedding']
|
||||
# Ensure embeddings without multimodal but same prompt do not match multimodal embeddings.
|
||||
assert content[0]['embedding'] != content[2]['embedding']
|
||||
else:
|
||||
assert res.status_code != 200
|
||||
643
tools/server/tests/utils.py
Normal file
@@ -0,0 +1,643 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# type: ignore[reportUnusedImport]
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from json import JSONDecodeError
|
||||
import sys
|
||||
import requests
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
ContextManager,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Tuple,
|
||||
Set,
|
||||
)
|
||||
from re import RegexFlag
|
||||
import wget
|
||||
|
||||
|
||||
DEFAULT_HTTP_TIMEOUT = 60
|
||||
|
||||
|
||||
class ServerResponse:
|
||||
headers: dict
|
||||
status_code: int
|
||||
body: dict | Any
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
def __init__(self, code, body):
|
||||
self.code = code
|
||||
self.body = body
|
||||
|
||||
|
||||
class ServerProcess:
|
||||
# default options
|
||||
debug: bool = False
|
||||
server_port: int = 8080
|
||||
server_host: str = "127.0.0.1"
|
||||
model_hf_repo: str | None = "ggml-org/models"
|
||||
model_hf_file: str | None = "tinyllamas/stories260K.gguf"
|
||||
model_alias: str = "tinyllama-2"
|
||||
temperature: float = 0.8
|
||||
seed: int = 42
|
||||
offline: bool = False
|
||||
|
||||
# custom options
|
||||
model_alias: str | None = None
|
||||
model_url: str | None = None
|
||||
model_file: str | None = None
|
||||
model_draft: str | None = None
|
||||
n_threads: int | None = None
|
||||
n_gpu_layer: int | None = None
|
||||
n_batch: int | None = None
|
||||
n_ubatch: int | None = None
|
||||
n_ctx: int | None = None
|
||||
n_ga: int | None = None
|
||||
n_ga_w: int | None = None
|
||||
n_predict: int | None = None
|
||||
n_prompts: int | None = 0
|
||||
slot_save_path: str | None = None
|
||||
id_slot: int | None = None
|
||||
cache_prompt: bool | None = None
|
||||
n_slots: int | None = None
|
||||
ctk: str | None = None
|
||||
ctv: str | None = None
|
||||
fa: str | None = None
|
||||
server_continuous_batching: bool | None = False
|
||||
server_embeddings: bool | None = False
|
||||
server_reranking: bool | None = False
|
||||
server_metrics: bool | None = False
|
||||
kv_unified: bool | None = False
|
||||
server_slots: bool | None = False
|
||||
pooling: str | None = None
|
||||
draft: int | None = None
|
||||
api_key: str | None = None
|
||||
models_dir: str | None = None
|
||||
models_max: int | None = None
|
||||
no_models_autoload: bool | None = None
|
||||
lora_files: List[str] | None = None
|
||||
enable_ctx_shift: int | None = False
|
||||
draft_min: int | None = None
|
||||
draft_max: int | None = None
|
||||
no_webui: bool | None = None
|
||||
jinja: bool | None = None
|
||||
reasoning_format: Literal['deepseek', 'none', 'nothink'] | None = None
|
||||
reasoning_budget: int | None = None
|
||||
chat_template: str | None = None
|
||||
chat_template_file: str | None = None
|
||||
server_path: str | None = None
|
||||
mmproj_url: str | None = None
|
||||
media_path: str | None = None
|
||||
sleep_idle_seconds: int | None = None
|
||||
|
||||
# session variables
|
||||
process: subprocess.Popen | None = None
|
||||
|
||||
def __init__(self):
|
||||
if "N_GPU_LAYERS" in os.environ:
|
||||
self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"])
|
||||
if "DEBUG" in os.environ:
|
||||
self.debug = True
|
||||
if "PORT" in os.environ:
|
||||
self.server_port = int(os.environ["PORT"])
|
||||
self.external_server = "DEBUG_EXTERNAL" in os.environ
|
||||
|
||||
def start(self, timeout_seconds: int | None = DEFAULT_HTTP_TIMEOUT) -> None:
|
||||
if self.external_server:
|
||||
print(f"[external_server]: Assuming external server running on {self.server_host}:{self.server_port}")
|
||||
return
|
||||
if self.server_path is not None:
|
||||
server_path = self.server_path
|
||||
elif "LLAMA_SERVER_BIN_PATH" in os.environ:
|
||||
server_path = os.environ["LLAMA_SERVER_BIN_PATH"]
|
||||
elif os.name == "nt":
|
||||
server_path = "../../../build/bin/Release/llama-server.exe"
|
||||
else:
|
||||
server_path = "../../../build/bin/llama-server"
|
||||
server_args = [
|
||||
"--host",
|
||||
self.server_host,
|
||||
"--port",
|
||||
self.server_port,
|
||||
"--temp",
|
||||
self.temperature,
|
||||
"--seed",
|
||||
self.seed,
|
||||
]
|
||||
if self.offline:
|
||||
server_args.append("--offline")
|
||||
if self.model_file:
|
||||
server_args.extend(["--model", self.model_file])
|
||||
if self.model_url:
|
||||
server_args.extend(["--model-url", self.model_url])
|
||||
if self.model_draft:
|
||||
server_args.extend(["--model-draft", self.model_draft])
|
||||
if self.model_hf_repo:
|
||||
server_args.extend(["--hf-repo", self.model_hf_repo])
|
||||
if self.model_hf_file:
|
||||
server_args.extend(["--hf-file", self.model_hf_file])
|
||||
if self.models_dir:
|
||||
server_args.extend(["--models-dir", self.models_dir])
|
||||
if self.models_max is not None:
|
||||
server_args.extend(["--models-max", self.models_max])
|
||||
if self.n_batch:
|
||||
server_args.extend(["--batch-size", self.n_batch])
|
||||
if self.n_ubatch:
|
||||
server_args.extend(["--ubatch-size", self.n_ubatch])
|
||||
if self.n_threads:
|
||||
server_args.extend(["--threads", self.n_threads])
|
||||
if self.n_gpu_layer:
|
||||
server_args.extend(["--n-gpu-layers", self.n_gpu_layer])
|
||||
if self.draft is not None:
|
||||
server_args.extend(["--draft", self.draft])
|
||||
if self.server_continuous_batching:
|
||||
server_args.append("--cont-batching")
|
||||
if self.server_embeddings:
|
||||
server_args.append("--embedding")
|
||||
if self.server_reranking:
|
||||
server_args.append("--reranking")
|
||||
if self.server_metrics:
|
||||
server_args.append("--metrics")
|
||||
if self.kv_unified:
|
||||
server_args.append("--kv-unified")
|
||||
if self.server_slots:
|
||||
server_args.append("--slots")
|
||||
else:
|
||||
server_args.append("--no-slots")
|
||||
if self.pooling:
|
||||
server_args.extend(["--pooling", self.pooling])
|
||||
if self.model_alias:
|
||||
server_args.extend(["--alias", self.model_alias])
|
||||
if self.n_ctx:
|
||||
server_args.extend(["--ctx-size", self.n_ctx])
|
||||
if self.n_slots:
|
||||
server_args.extend(["--parallel", self.n_slots])
|
||||
if self.ctk:
|
||||
server_args.extend(["-ctk", self.ctk])
|
||||
if self.ctv:
|
||||
server_args.extend(["-ctv", self.ctv])
|
||||
if self.fa is not None:
|
||||
server_args.extend(["-fa", self.fa])
|
||||
if self.n_predict:
|
||||
server_args.extend(["--n-predict", self.n_predict])
|
||||
if self.slot_save_path:
|
||||
server_args.extend(["--slot-save-path", self.slot_save_path])
|
||||
if self.n_ga:
|
||||
server_args.extend(["--grp-attn-n", self.n_ga])
|
||||
if self.n_ga_w:
|
||||
server_args.extend(["--grp-attn-w", self.n_ga_w])
|
||||
if self.debug:
|
||||
server_args.append("--verbose")
|
||||
if self.lora_files:
|
||||
for lora_file in self.lora_files:
|
||||
server_args.extend(["--lora", lora_file])
|
||||
if self.enable_ctx_shift:
|
||||
server_args.append("--context-shift")
|
||||
if self.api_key:
|
||||
server_args.extend(["--api-key", self.api_key])
|
||||
if self.draft_max:
|
||||
server_args.extend(["--draft-max", self.draft_max])
|
||||
if self.draft_min:
|
||||
server_args.extend(["--draft-min", self.draft_min])
|
||||
if self.no_webui:
|
||||
server_args.append("--no-webui")
|
||||
if self.no_models_autoload:
|
||||
server_args.append("--no-models-autoload")
|
||||
if self.jinja:
|
||||
server_args.append("--jinja")
|
||||
else:
|
||||
server_args.append("--no-jinja")
|
||||
if self.reasoning_format is not None:
|
||||
server_args.extend(("--reasoning-format", self.reasoning_format))
|
||||
if self.reasoning_budget is not None:
|
||||
server_args.extend(("--reasoning-budget", self.reasoning_budget))
|
||||
if self.chat_template:
|
||||
server_args.extend(["--chat-template", self.chat_template])
|
||||
if self.chat_template_file:
|
||||
server_args.extend(["--chat-template-file", self.chat_template_file])
|
||||
if self.mmproj_url:
|
||||
server_args.extend(["--mmproj-url", self.mmproj_url])
|
||||
if self.media_path:
|
||||
server_args.extend(["--media-path", self.media_path])
|
||||
if self.sleep_idle_seconds is not None:
|
||||
server_args.extend(["--sleep-idle-seconds", self.sleep_idle_seconds])
|
||||
|
||||
args = [str(arg) for arg in [server_path, *server_args]]
|
||||
print(f"tests: starting server with: {' '.join(args)}")
|
||||
|
||||
flags = 0
|
||||
if "nt" == os.name:
|
||||
flags |= subprocess.DETACHED_PROCESS
|
||||
flags |= subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
flags |= subprocess.CREATE_NO_WINDOW
|
||||
|
||||
self.process = subprocess.Popen(
|
||||
[str(arg) for arg in [server_path, *server_args]],
|
||||
creationflags=flags,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stdout,
|
||||
env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None,
|
||||
)
|
||||
server_instances.add(self)
|
||||
|
||||
print(f"server pid={self.process.pid}, pytest pid={os.getpid()}")
|
||||
|
||||
# wait for server to start
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout_seconds:
|
||||
try:
|
||||
response = self.make_request("GET", "/health", headers={
|
||||
"Authorization": f"Bearer {self.api_key}" if self.api_key else None
|
||||
})
|
||||
if response.status_code == 200:
|
||||
self.ready = True
|
||||
return # server is ready
|
||||
except Exception as e:
|
||||
pass
|
||||
# Check if process died
|
||||
if self.process.poll() is not None:
|
||||
raise RuntimeError(f"Server process died with return code {self.process.returncode}")
|
||||
|
||||
print(f"Waiting for server to start...")
|
||||
time.sleep(0.5)
|
||||
raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
|
||||
|
||||
def stop(self) -> None:
|
||||
if self.external_server:
|
||||
print("[external_server]: Not stopping external server")
|
||||
return
|
||||
if self in server_instances:
|
||||
server_instances.remove(self)
|
||||
if self.process:
|
||||
print(f"Stopping server with pid={self.process.pid}")
|
||||
self.process.kill()
|
||||
self.process = None
|
||||
|
||||
def make_request(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
data: dict | Any | None = None,
|
||||
headers: dict | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> ServerResponse:
|
||||
url = f"http://{self.server_host}:{self.server_port}{path}"
|
||||
parse_body = False
|
||||
if method == "GET":
|
||||
response = requests.get(url, headers=headers, timeout=timeout)
|
||||
parse_body = True
|
||||
elif method == "POST":
|
||||
response = requests.post(url, headers=headers, json=data, timeout=timeout)
|
||||
parse_body = True
|
||||
elif method == "OPTIONS":
|
||||
response = requests.options(url, headers=headers, timeout=timeout)
|
||||
else:
|
||||
raise ValueError(f"Unimplemented method: {method}")
|
||||
result = ServerResponse()
|
||||
result.headers = dict(response.headers)
|
||||
result.status_code = response.status_code
|
||||
if parse_body:
|
||||
try:
|
||||
result.body = response.json()
|
||||
except JSONDecodeError:
|
||||
result.body = response.text
|
||||
else:
|
||||
result.body = None
|
||||
print("Response from server", json.dumps(result.body, indent=2))
|
||||
return result
|
||||
|
||||
def make_stream_request(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
data: dict | None = None,
|
||||
headers: dict | None = None,
|
||||
) -> Iterator[dict]:
|
||||
url = f"http://{self.server_host}:{self.server_port}{path}"
|
||||
if method == "POST":
|
||||
response = requests.post(url, headers=headers, json=data, stream=True)
|
||||
else:
|
||||
raise ValueError(f"Unimplemented method: {method}")
|
||||
if response.status_code != 200:
|
||||
raise ServerError(response.status_code, response.json())
|
||||
for line_bytes in response.iter_lines():
|
||||
line = line_bytes.decode("utf-8")
|
||||
if '[DONE]' in line:
|
||||
break
|
||||
elif line.startswith('data: '):
|
||||
data = json.loads(line[6:])
|
||||
print("Partial response from server", json.dumps(data, indent=2))
|
||||
yield data
|
||||
|
||||
def make_any_request(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
data: dict | None = None,
|
||||
headers: dict | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> dict:
|
||||
stream = data.get('stream', False)
|
||||
if stream:
|
||||
content: list[str] = []
|
||||
reasoning_content: list[str] = []
|
||||
tool_calls: list[dict] = []
|
||||
finish_reason: Optional[str] = None
|
||||
|
||||
content_parts = 0
|
||||
reasoning_content_parts = 0
|
||||
tool_call_parts = 0
|
||||
arguments_parts = 0
|
||||
|
||||
for chunk in self.make_stream_request(method, path, data, headers):
|
||||
if chunk['choices']:
|
||||
assert len(chunk['choices']) == 1, f'Expected 1 choice, got {len(chunk["choices"])}'
|
||||
choice = chunk['choices'][0]
|
||||
if choice['delta'].get('content') is not None:
|
||||
assert len(choice['delta']['content']) > 0, f'Expected non empty content delta!'
|
||||
content.append(choice['delta']['content'])
|
||||
content_parts += 1
|
||||
if choice['delta'].get('reasoning_content') is not None:
|
||||
assert len(choice['delta']['reasoning_content']) > 0, f'Expected non empty reasoning_content delta!'
|
||||
reasoning_content.append(choice['delta']['reasoning_content'])
|
||||
reasoning_content_parts += 1
|
||||
if choice['delta'].get('finish_reason') is not None:
|
||||
finish_reason = choice['delta']['finish_reason']
|
||||
for tc in choice['delta'].get('tool_calls', []):
|
||||
if 'function' not in tc:
|
||||
raise ValueError(f"Expected function type, got {tc['type']}")
|
||||
if tc['index'] >= len(tool_calls):
|
||||
assert 'id' in tc
|
||||
assert tc.get('type') == 'function'
|
||||
assert 'function' in tc and 'name' in tc['function'] and len(tc['function']['name']) > 0, \
|
||||
f"Expected function call with name, got {tc.get('function')}"
|
||||
tool_calls.append(dict(
|
||||
id="",
|
||||
type="function",
|
||||
function=dict(
|
||||
name="",
|
||||
arguments="",
|
||||
)
|
||||
))
|
||||
tool_call = tool_calls[tc['index']]
|
||||
if tc.get('id') is not None:
|
||||
tool_call['id'] = tc['id']
|
||||
fct = tc['function']
|
||||
assert 'id' not in fct, f"Function call should not have id: {fct}"
|
||||
if fct.get('name') is not None:
|
||||
tool_call['function']['name'] = tool_call['function'].get('name', '') + fct['name']
|
||||
if fct.get('arguments') is not None:
|
||||
tool_call['function']['arguments'] += fct['arguments']
|
||||
arguments_parts += 1
|
||||
tool_call_parts += 1
|
||||
else:
|
||||
# When `include_usage` is True (the default), we expect the last chunk of the stream
|
||||
# immediately preceding the `data: [DONE]` message to contain a `choices` field with an empty array
|
||||
# and a `usage` field containing the usage statistics (n.b., llama-server also returns `timings` in
|
||||
# the last chunk)
|
||||
assert 'usage' in chunk, f"Expected finish_reason in chunk: {chunk}"
|
||||
assert 'timings' in chunk, f"Expected finish_reason in chunk: {chunk}"
|
||||
print(f'Streamed response had {content_parts} content parts, {reasoning_content_parts} reasoning_content parts, {tool_call_parts} tool call parts incl. {arguments_parts} arguments parts')
|
||||
result = dict(
|
||||
choices=[
|
||||
dict(
|
||||
index=0,
|
||||
finish_reason=finish_reason,
|
||||
message=dict(
|
||||
role='assistant',
|
||||
content=''.join(content) if content else None,
|
||||
reasoning_content=''.join(reasoning_content) if reasoning_content else None,
|
||||
tool_calls=tool_calls if tool_calls else None,
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
print("Final response from server", json.dumps(result, indent=2))
|
||||
return result
|
||||
else:
|
||||
response = self.make_request(method, path, data, headers, timeout=timeout)
|
||||
assert response.status_code == 200, f"Server returned error: {response.status_code}"
|
||||
return response.body
|
||||
|
||||
|
||||
|
||||
server_instances: Set[ServerProcess] = set()
|
||||
|
||||
|
||||
class ServerPreset:
|
||||
@staticmethod
|
||||
def load_all() -> None:
|
||||
""" Load all server presets to ensure model files are cached. """
|
||||
servers: List[ServerProcess] = [
|
||||
method()
|
||||
for name, method in ServerPreset.__dict__.items()
|
||||
if callable(method) and name != "load_all"
|
||||
]
|
||||
for server in servers:
|
||||
server.offline = False
|
||||
server.start()
|
||||
server.stop()
|
||||
|
||||
@staticmethod
|
||||
def tinyllama2() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/test-model-stories260K"
|
||||
server.model_hf_file = None
|
||||
server.model_alias = "tinyllama-2"
|
||||
server.n_ctx = 512
|
||||
server.n_batch = 32
|
||||
server.n_slots = 2
|
||||
server.n_predict = 64
|
||||
server.seed = 42
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def bert_bge_small() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/models"
|
||||
server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
|
||||
server.model_alias = "bert-bge-small"
|
||||
server.n_ctx = 512
|
||||
server.n_batch = 128
|
||||
server.n_ubatch = 128
|
||||
server.n_slots = 2
|
||||
server.seed = 42
|
||||
server.server_embeddings = True
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def bert_bge_small_with_fa() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/models"
|
||||
server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
|
||||
server.model_alias = "bert-bge-small"
|
||||
server.n_ctx = 1024
|
||||
server.n_batch = 300
|
||||
server.n_ubatch = 300
|
||||
server.n_slots = 2
|
||||
server.fa = "on"
|
||||
server.seed = 42
|
||||
server.server_embeddings = True
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def tinyllama_infill() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/test-model-stories260K-infill"
|
||||
server.model_hf_file = None
|
||||
server.model_alias = "tinyllama-infill"
|
||||
server.n_ctx = 2048
|
||||
server.n_batch = 1024
|
||||
server.n_slots = 1
|
||||
server.n_predict = 64
|
||||
server.temperature = 0.0
|
||||
server.seed = 42
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def stories15m_moe() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/stories15M_MOE"
|
||||
server.model_hf_file = "stories15M_MOE-F16.gguf"
|
||||
server.model_alias = "stories15m-moe"
|
||||
server.n_ctx = 2048
|
||||
server.n_batch = 1024
|
||||
server.n_slots = 1
|
||||
server.n_predict = 64
|
||||
server.temperature = 0.0
|
||||
server.seed = 42
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def jina_reranker_tiny() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
server.model_hf_repo = "ggml-org/models"
|
||||
server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf"
|
||||
server.model_alias = "jina-reranker"
|
||||
server.n_ctx = 512
|
||||
server.n_batch = 512
|
||||
server.n_slots = 1
|
||||
server.seed = 42
|
||||
server.server_reranking = True
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def tinygemma3() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
# mmproj is already provided by HF registry API
|
||||
server.model_hf_file = None
|
||||
server.model_hf_repo = "ggml-org/tinygemma3-GGUF:Q8_0"
|
||||
server.model_alias = "tinygemma3"
|
||||
server.n_ctx = 1024
|
||||
server.n_batch = 32
|
||||
server.n_slots = 2
|
||||
server.n_predict = 4
|
||||
server.seed = 42
|
||||
return server
|
||||
|
||||
@staticmethod
|
||||
def router() -> ServerProcess:
|
||||
server = ServerProcess()
|
||||
server.offline = True # will be downloaded by load_all()
|
||||
# router server has no models
|
||||
server.model_file = None
|
||||
server.model_alias = None
|
||||
server.model_hf_repo = None
|
||||
server.model_hf_file = None
|
||||
server.n_ctx = 1024
|
||||
server.n_batch = 16
|
||||
server.n_slots = 1
|
||||
server.n_predict = 16
|
||||
server.seed = 42
|
||||
return server
|
||||
|
||||
|
||||
def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]:
|
||||
"""
|
||||
Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS.
|
||||
|
||||
Example usage:
|
||||
|
||||
results = parallel_function_calls([
|
||||
(func1, (arg1, arg2)),
|
||||
(func2, (arg3, arg4)),
|
||||
])
|
||||
"""
|
||||
results = [None] * len(function_list)
|
||||
exceptions = []
|
||||
|
||||
def worker(index, func, args):
|
||||
try:
|
||||
result = func(*args)
|
||||
results[index] = result
|
||||
except Exception as e:
|
||||
exceptions.append((index, str(e)))
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
futures = []
|
||||
for i, (func, args) in enumerate(function_list):
|
||||
future = executor.submit(worker, i, func, args)
|
||||
futures.append(future)
|
||||
|
||||
# Wait for all futures to complete
|
||||
for future in as_completed(futures):
|
||||
pass
|
||||
|
||||
# Check if there were any exceptions
|
||||
if exceptions:
|
||||
print("Exceptions occurred:")
|
||||
for index, error in exceptions:
|
||||
print(f"Function at index {index}: {error}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def match_regex(regex: str, text: str) -> bool:
|
||||
return (
|
||||
re.compile(
|
||||
regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL
|
||||
).search(text)
|
||||
is not None
|
||||
)
|
||||
|
||||
|
||||
def download_file(url: str, output_file_path: str | None = None) -> str:
|
||||
"""
|
||||
Download a file from a URL to a local path. If the file already exists, it will not be downloaded again.
|
||||
|
||||
output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory.
|
||||
|
||||
Returns the local path of the downloaded file.
|
||||
"""
|
||||
file_name = url.split('/').pop()
|
||||
output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path
|
||||
if not os.path.exists(output_file):
|
||||
print(f"Downloading {url} to {output_file}")
|
||||
wget.download(url, out=output_file)
|
||||
print(f"Done downloading to {output_file}")
|
||||
else:
|
||||
print(f"File already exists at {output_file}")
|
||||
return output_file
|
||||
|
||||
|
||||
def is_slow_test_allowed():
|
||||
return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON"
|
||||
5
tools/server/themes/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# LLaMA.cpp Server Wild Theme
|
||||
|
||||
Simple themes directory of sample "public" directories. To try any of these add --path to your run like `server --path=wild`.
|
||||
|
||||

|
||||
7
tools/server/themes/buttons-top/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# LLaMA.cpp Server Buttons Top Theme
|
||||
|
||||
Simple tweaks to the UI. Chat buttons at the top of the page instead of bottom so you can hit Stop instead of chasing it down the page.
|
||||
|
||||
To use simply run server with `--path=themes/buttons_top`
|
||||
|
||||

|
||||
BIN
tools/server/themes/buttons-top/buttons_top.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
tools/server/themes/buttons-top/favicon.ico
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
1052
tools/server/themes/buttons-top/index.html
Normal file
5
tools/server/themes/wild/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# LLaMA.cpp Server Wild Theme
|
||||
|
||||
Simple tweaks to the UI. To use simply run server with `--path=themes/wild`
|
||||
|
||||

|
||||
BIN
tools/server/themes/wild/favicon.ico
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
1056
tools/server/themes/wild/index.html
Normal file
BIN
tools/server/themes/wild/llama_cpp.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
tools/server/themes/wild/llamapattern.png
Normal file
|
After Width: | Height: | Size: 254 KiB |
BIN
tools/server/themes/wild/wild.png
Normal file
|
After Width: | Height: | Size: 485 KiB |
28
tools/server/webui/.gitignore
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
test-results
|
||||
node_modules
|
||||
|
||||
# Output
|
||||
.output
|
||||
.vercel
|
||||
.netlify
|
||||
.wrangler
|
||||
/.svelte-kit
|
||||
/build
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Env
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.test
|
||||
|
||||
# Vite
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
|
||||
*storybook.log
|
||||
storybook-static
|
||||
*.code-workspace
|
||||
1
tools/server/webui/.npmrc
Normal file
@@ -0,0 +1 @@
|
||||
engine-strict=true
|
||||
9
tools/server/webui/.prettierignore
Normal file
@@ -0,0 +1,9 @@
|
||||
# Package Managers
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
bun.lock
|
||||
bun.lockb
|
||||
|
||||
# Miscellaneous
|
||||
/static/
|
||||
16
tools/server/webui/.prettierrc
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"useTabs": true,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"printWidth": 100,
|
||||
"plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "*.svelte",
|
||||
"options": {
|
||||
"parser": "svelte"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tailwindStylesheet": "./src/app.css"
|
||||
}
|
||||
36
tools/server/webui/.storybook/ModeWatcherDecorator.svelte
Normal file
@@ -0,0 +1,36 @@
|
||||
<script lang="ts">
|
||||
import { ModeWatcher } from 'mode-watcher';
|
||||
import { onMount } from 'svelte';
|
||||
|
||||
interface Props {
|
||||
children?: any;
|
||||
}
|
||||
|
||||
let { children }: Props = $props();
|
||||
|
||||
onMount(() => {
|
||||
const root = document.documentElement;
|
||||
const theme = localStorage.getItem('mode-watcher-mode') || 'system';
|
||||
|
||||
if (theme === 'dark') {
|
||||
root.classList.add('dark');
|
||||
} else if (theme === 'light') {
|
||||
root.classList.remove('dark');
|
||||
} else {
|
||||
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
if (prefersDark) {
|
||||
root.classList.add('dark');
|
||||
} else {
|
||||
root.classList.remove('dark');
|
||||
}
|
||||
}
|
||||
});
|
||||
</script>
|
||||
|
||||
<ModeWatcher />
|
||||
|
||||
{#if children}
|
||||
{@const Component = children}
|
||||
|
||||
<Component />
|
||||
{/if}
|
||||
@@ -0,0 +1,13 @@
|
||||
<script lang="ts">
|
||||
import * as Tooltip from '../src/lib/components/ui/tooltip';
|
||||
|
||||
interface Props {
|
||||
children: any;
|
||||
}
|
||||
|
||||
let { children }: Props = $props();
|
||||
</script>
|
||||
|
||||
<Tooltip.Provider>
|
||||
{@render children()}
|
||||
</Tooltip.Provider>
|
||||
17
tools/server/webui/.storybook/main.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { StorybookConfig } from '@storybook/sveltekit';
|
||||
|
||||
const config: StorybookConfig = {
|
||||
stories: ['../tests/stories/**/*.mdx', '../tests/stories/**/*.stories.@(js|ts|svelte)'],
|
||||
addons: [
|
||||
'@storybook/addon-svelte-csf',
|
||||
'@chromatic-com/storybook',
|
||||
'@storybook/addon-docs',
|
||||
'@storybook/addon-a11y',
|
||||
'@storybook/addon-vitest'
|
||||
],
|
||||
framework: {
|
||||
name: '@storybook/sveltekit',
|
||||
options: {}
|
||||
}
|
||||
};
|
||||
export default config;
|
||||
42
tools/server/webui/.storybook/preview.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import type { Preview } from '@storybook/sveltekit';
|
||||
import '../src/app.css';
|
||||
import ModeWatcherDecorator from './ModeWatcherDecorator.svelte';
|
||||
import TooltipProviderDecorator from './TooltipProviderDecorator.svelte';
|
||||
|
||||
const preview: Preview = {
|
||||
parameters: {
|
||||
controls: {
|
||||
matchers: {
|
||||
color: /(background|color)$/i,
|
||||
date: /Date$/i
|
||||
}
|
||||
},
|
||||
|
||||
backgrounds: {
|
||||
disable: true
|
||||
},
|
||||
|
||||
a11y: {
|
||||
// 'todo' - show a11y violations in the test UI only
|
||||
// 'error' - fail CI on a11y violations
|
||||
// 'off' - skip a11y checks entirely
|
||||
test: 'todo'
|
||||
}
|
||||
},
|
||||
decorators: [
|
||||
(story) => ({
|
||||
Component: ModeWatcherDecorator,
|
||||
props: {
|
||||
children: story
|
||||
}
|
||||
}),
|
||||
(story) => ({
|
||||
Component: TooltipProviderDecorator,
|
||||
props: {
|
||||
children: story
|
||||
}
|
||||
})
|
||||
]
|
||||
};
|
||||
|
||||
export default preview;
|
||||
12
tools/server/webui/.storybook/vitest.setup.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import * as a11yAddonAnnotations from '@storybook/addon-a11y/preview';
|
||||
import { setProjectAnnotations } from '@storybook/sveltekit';
|
||||
import * as previewAnnotations from './preview';
|
||||
import { beforeAll } from 'vitest';
|
||||
|
||||
const project = setProjectAnnotations([a11yAddonAnnotations, previewAnnotations]);
|
||||
|
||||
beforeAll(async () => {
|
||||
if (project.beforeAll) {
|
||||
await project.beforeAll();
|
||||
}
|
||||
});
|
||||
687
tools/server/webui/README.md
Normal file
@@ -0,0 +1,687 @@
|
||||
# llama.cpp Web UI
|
||||
|
||||
A modern, feature-rich web interface for llama.cpp built with SvelteKit. This UI provides an intuitive chat interface with advanced file handling, conversation management, and comprehensive model interaction capabilities.
|
||||
|
||||
The WebUI supports two server operation modes:
|
||||
|
||||
- **MODEL mode** - Single model operation (standard llama-server)
|
||||
- **ROUTER mode** - Multi-model operation with dynamic model loading/unloading
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Features](#features)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Tech Stack](#tech-stack)
|
||||
- [Build Pipeline](#build-pipeline)
|
||||
- [Architecture](#architecture)
|
||||
- [Data Flows](#data-flows)
|
||||
- [Architectural Patterns](#architectural-patterns)
|
||||
- [Testing](#testing)
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
### Chat Interface
|
||||
|
||||
- **Streaming responses** with real-time updates
|
||||
- **Reasoning content** - Support for models with thinking/reasoning blocks
|
||||
- **Dark/light theme** with system preference detection
|
||||
- **Responsive design** for desktop and mobile
|
||||
|
||||
### File Attachments
|
||||
|
||||
- **Images** - JPEG, PNG, GIF, WebP, SVG (with PNG conversion)
|
||||
- **Documents** - PDF (text extraction or image conversion for vision models)
|
||||
- **Audio** - MP3, WAV for audio-capable models
|
||||
- **Text files** - Source code, markdown, and other text formats
|
||||
- **Drag-and-drop** and paste support with rich previews
|
||||
|
||||
### Conversation Management
|
||||
|
||||
- **Branching** - Branch messages conversations at any point by editing messages or regenerating responses, navigate between branches
|
||||
- **Regeneration** - Regenerate responses with optional model switching (ROUTER mode)
|
||||
- **Import/Export** - JSON format for backup and sharing
|
||||
- **Search** - Find conversations by title or content
|
||||
|
||||
### Advanced Rendering
|
||||
|
||||
- **Syntax highlighting** - Code blocks with language detection
|
||||
- **Math formulas** - KaTeX rendering for LaTeX expressions
|
||||
- **Markdown** - Full GFM support with tables, lists, and more
|
||||
|
||||
### Multi-Model Support (ROUTER mode)
|
||||
|
||||
- **Model selector** with Loaded/Available groups
|
||||
- **Automatic loading** - Models load on selection
|
||||
- **Modality validation** - Prevents sending images to non-vision models
|
||||
- **LRU unloading** - Server auto-manages model cache
|
||||
|
||||
### Keyboard Shortcuts
|
||||
|
||||
| Shortcut | Action |
|
||||
| ------------------ | -------------------- |
|
||||
| `Shift+Ctrl/Cmd+O` | New chat |
|
||||
| `Shift+Ctrl/Cmd+E` | Edit conversation |
|
||||
| `Shift+Ctrl/Cmd+D` | Delete conversation |
|
||||
| `Ctrl/Cmd+K` | Search conversations |
|
||||
| `Ctrl/Cmd+B` | Toggle sidebar |
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- **Request tracking** - Monitor token generation with `/slots` endpoint
|
||||
- **Storybook** - Component library with visual testing
|
||||
- **Hot reload** - Instant updates during development
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Node.js** 18+ (20+ recommended)
|
||||
- **npm** 9+
|
||||
- **llama-server** running locally (for API access)
|
||||
|
||||
### 1. Install Dependencies
|
||||
|
||||
```bash
|
||||
cd tools/server/webui
|
||||
npm install
|
||||
```
|
||||
|
||||
### 2. Start llama-server
|
||||
|
||||
In a separate terminal, start the backend server:
|
||||
|
||||
```bash
|
||||
# Single model (MODEL mode)
|
||||
./llama-server -m model.gguf
|
||||
|
||||
# Multi-model (ROUTER mode)
|
||||
./llama-server --model-store /path/to/models
|
||||
```
|
||||
|
||||
### 3. Start Development Servers
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
This starts:
|
||||
|
||||
- **Vite dev server** at `http://localhost:5173` - The main WebUI
|
||||
- **Storybook** at `http://localhost:6006` - Component documentation
|
||||
|
||||
The Vite dev server proxies API requests to `http://localhost:8080` (default llama-server port):
|
||||
|
||||
```typescript
|
||||
// vite.config.ts proxy configuration
|
||||
proxy: {
|
||||
'/v1': 'http://localhost:8080',
|
||||
'/props': 'http://localhost:8080',
|
||||
'/slots': 'http://localhost:8080',
|
||||
'/models': 'http://localhost:8080'
|
||||
}
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
1. Open `http://localhost:5173` in your browser
|
||||
2. Make changes to `.svelte`, `.ts`, or `.css` files
|
||||
3. Changes hot-reload instantly
|
||||
4. Use Storybook at `http://localhost:6006` for isolated component development
|
||||
|
||||
---
|
||||
|
||||
## Tech Stack
|
||||
|
||||
| Layer | Technology | Purpose |
|
||||
| ----------------- | ------------------------------- | -------------------------------------------------------- |
|
||||
| **Framework** | SvelteKit + Svelte 5 | Reactive UI with runes (`$state`, `$derived`, `$effect`) |
|
||||
| **UI Components** | shadcn-svelte + bits-ui | Accessible, customizable component library |
|
||||
| **Styling** | TailwindCSS 4 | Utility-first CSS with design tokens |
|
||||
| **Database** | IndexedDB (Dexie) | Client-side storage for conversations and messages |
|
||||
| **Build** | Vite | Fast bundling with static adapter |
|
||||
| **Testing** | Playwright + Vitest + Storybook | E2E, unit, and visual testing |
|
||||
| **Markdown** | remark + rehype | Markdown processing with KaTeX and syntax highlighting |
|
||||
|
||||
### Key Dependencies
|
||||
|
||||
```json
|
||||
{
|
||||
"svelte": "^5.0.0",
|
||||
"bits-ui": "^2.8.11",
|
||||
"dexie": "^4.0.11",
|
||||
"pdfjs-dist": "^5.4.54",
|
||||
"highlight.js": "^11.11.1",
|
||||
"rehype-katex": "^7.0.1"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Build Pipeline
|
||||
|
||||
### Development Build
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Runs Vite in development mode with:
|
||||
|
||||
- Hot Module Replacement (HMR)
|
||||
- Source maps
|
||||
- Proxy to llama-server
|
||||
|
||||
### Production Build
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
The build process:
|
||||
|
||||
1. **Vite Build** - Bundles all TypeScript, Svelte, and CSS
|
||||
2. **Static Adapter** - Outputs to `../public` (llama-server's static file directory)
|
||||
3. **Post-Build Script** - Cleans up intermediate files
|
||||
4. **Custom Plugin** - Creates `index.html.gz` with:
|
||||
- Inlined favicon as base64
|
||||
- GZIP compression (level 9)
|
||||
- Deterministic output (zeroed timestamps)
|
||||
|
||||
```text
|
||||
tools/server/webui/ → build → tools/server/public/
|
||||
├── src/ ├── index.html.gz (served by llama-server)
|
||||
├── static/ └── (favicon inlined)
|
||||
└── ...
|
||||
```
|
||||
|
||||
### SvelteKit Configuration
|
||||
|
||||
```javascript
|
||||
// svelte.config.js
|
||||
adapter: adapter({
|
||||
pages: '../public', // Output directory
|
||||
assets: '../public', // Static assets
|
||||
fallback: 'index.html', // SPA fallback
|
||||
strict: true
|
||||
}),
|
||||
output: {
|
||||
bundleStrategy: 'inline' // Single-file bundle
|
||||
}
|
||||
```
|
||||
|
||||
### Integration with llama-server
|
||||
|
||||
The WebUI is embedded directly into the llama-server binary:
|
||||
|
||||
1. `npm run build` outputs `index.html.gz` to `tools/server/public/`
|
||||
2. llama-server compiles this into the binary at build time
|
||||
3. When accessing `/`, llama-server serves the gzipped HTML
|
||||
4. All assets are inlined (CSS, JS, fonts, favicon)
|
||||
|
||||
This results in a **single portable binary** with the full WebUI included.
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
The WebUI follows a layered architecture with unidirectional data flow:
|
||||
|
||||
```text
|
||||
Routes → Components → Hooks → Stores → Services → Storage/API
|
||||
```
|
||||
|
||||
### High-Level Architecture
|
||||
|
||||
See: [`docs/architecture/high-level-architecture-simplified.md`](docs/architecture/high-level-architecture-simplified.md)
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Routes["📍 Routes"]
|
||||
R1["/ (Welcome)"]
|
||||
R2["/chat/[id]"]
|
||||
RL["+layout.svelte"]
|
||||
end
|
||||
|
||||
subgraph Components["🧩 Components"]
|
||||
C_Sidebar["ChatSidebar"]
|
||||
C_Screen["ChatScreen"]
|
||||
C_Form["ChatForm"]
|
||||
C_Messages["ChatMessages"]
|
||||
C_ModelsSelector["ModelsSelector"]
|
||||
C_Settings["ChatSettings"]
|
||||
end
|
||||
|
||||
subgraph Stores["🗄️ Stores"]
|
||||
S1["chatStore"]
|
||||
S2["conversationsStore"]
|
||||
S3["modelsStore"]
|
||||
S4["serverStore"]
|
||||
S5["settingsStore"]
|
||||
end
|
||||
|
||||
subgraph Services["⚙️ Services"]
|
||||
SV1["ChatService"]
|
||||
SV2["ModelsService"]
|
||||
SV3["PropsService"]
|
||||
SV4["DatabaseService"]
|
||||
end
|
||||
|
||||
subgraph Storage["💾 Storage"]
|
||||
ST1["IndexedDB"]
|
||||
ST2["LocalStorage"]
|
||||
end
|
||||
|
||||
subgraph APIs["🌐 llama-server"]
|
||||
API1["/v1/chat/completions"]
|
||||
API2["/props"]
|
||||
API3["/models/*"]
|
||||
end
|
||||
|
||||
R1 & R2 --> C_Screen
|
||||
RL --> C_Sidebar
|
||||
C_Screen --> C_Form & C_Messages & C_Settings
|
||||
C_Screen --> S1 & S2
|
||||
C_ModelsSelector --> S3 & S4
|
||||
S1 --> SV1 & SV4
|
||||
S3 --> SV2 & SV3
|
||||
SV4 --> ST1
|
||||
SV1 --> API1
|
||||
SV2 --> API3
|
||||
SV3 --> API2
|
||||
```
|
||||
|
||||
### Layer Breakdown
|
||||
|
||||
#### Routes (`src/routes/`)
|
||||
|
||||
- **`/`** - Welcome screen, creates new conversation
|
||||
- **`/chat/[id]`** - Active chat interface
|
||||
- **`+layout.svelte`** - Sidebar, navigation, global initialization
|
||||
|
||||
#### Components (`src/lib/components/`)
|
||||
|
||||
Components are organized in `app/` (application-specific) and `ui/` (shadcn-svelte primitives).
|
||||
|
||||
**Chat Components** (`app/chat/`):
|
||||
|
||||
| Component | Responsibility |
|
||||
| ------------------ | --------------------------------------------------------------------------- |
|
||||
| `ChatScreen/` | Main chat container, coordinates message list, input form, and attachments |
|
||||
| `ChatForm/` | Message input textarea with file upload, paste handling, keyboard shortcuts |
|
||||
| `ChatMessages/` | Message list with branch navigation, regenerate/continue/edit actions |
|
||||
| `ChatAttachments/` | File attachment previews, drag-and-drop, PDF/image/audio handling |
|
||||
| `ChatSettings/` | Parameter sliders (temperature, top-p, etc.) with server default sync |
|
||||
| `ChatSidebar/` | Conversation list, search, import/export, navigation |
|
||||
|
||||
**Dialog Components** (`app/dialogs/`):
|
||||
|
||||
| Component | Responsibility |
|
||||
| ------------------------------- | -------------------------------------------------------- |
|
||||
| `DialogChatSettings` | Full-screen settings configuration |
|
||||
| `DialogModelInformation` | Model details (context size, modalities, parallel slots) |
|
||||
| `DialogChatAttachmentPreview` | Full preview for images, PDFs (text or page view), code |
|
||||
| `DialogConfirmation` | Generic confirmation for destructive actions |
|
||||
| `DialogConversationTitleUpdate` | Edit conversation title |
|
||||
|
||||
**Server/Model Components** (`app/server/`, `app/models/`):
|
||||
|
||||
| Component | Responsibility |
|
||||
| ------------------- | --------------------------------------------------------- |
|
||||
| `ServerErrorSplash` | Error display when server is unreachable |
|
||||
| `ModelsSelector` | Model dropdown with Loaded/Available groups (ROUTER mode) |
|
||||
|
||||
**Shared UI Components** (`app/misc/`):
|
||||
|
||||
| Component | Responsibility |
|
||||
| -------------------------------- | ---------------------------------------------------------------- |
|
||||
| `MarkdownContent` | Markdown rendering with KaTeX, syntax highlighting, copy buttons |
|
||||
| `SyntaxHighlightedCode` | Code blocks with language detection and highlighting |
|
||||
| `ActionButton`, `ActionDropdown` | Reusable action buttons and menus |
|
||||
| `BadgeModality`, `BadgeInfo` | Status and capability badges |
|
||||
|
||||
#### Hooks (`src/lib/hooks/`)
|
||||
|
||||
- **`useModelChangeValidation`** - Validates model switch against conversation modalities
|
||||
- **`useProcessingState`** - Tracks streaming progress and token generation
|
||||
|
||||
#### Stores (`src/lib/stores/`)
|
||||
|
||||
| Store | Responsibility |
|
||||
| -------------------- | --------------------------------------------------------- |
|
||||
| `chatStore` | Message sending, streaming, abort control, error handling |
|
||||
| `conversationsStore` | CRUD for conversations, message branching, navigation |
|
||||
| `modelsStore` | Model list, selection, loading/unloading (ROUTER) |
|
||||
| `serverStore` | Server properties, role detection, modalities |
|
||||
| `settingsStore` | User preferences, parameter sync with server defaults |
|
||||
|
||||
#### Services (`src/lib/services/`)
|
||||
|
||||
| Service | Responsibility |
|
||||
| ---------------------- | ----------------------------------------------- |
|
||||
| `ChatService` | API calls to`/v1/chat/completions`, SSE parsing |
|
||||
| `ModelsService` | `/models`, `/models/load`, `/models/unload` |
|
||||
| `PropsService` | `/props`, `/props?model=` |
|
||||
| `DatabaseService` | IndexedDB operations via Dexie |
|
||||
| `ParameterSyncService` | Syncs settings with server defaults |
|
||||
|
||||
---
|
||||
|
||||
## Data Flows
|
||||
|
||||
### MODEL Mode (Single Model)
|
||||
|
||||
See: [`docs/flows/data-flow-simplified-model-mode.md`](docs/flows/data-flow-simplified-model-mode.md)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant UI
|
||||
participant Stores
|
||||
participant DB as IndexedDB
|
||||
participant API as llama-server
|
||||
|
||||
Note over User,API: Initialization
|
||||
UI->>Stores: initialize()
|
||||
Stores->>DB: load conversations
|
||||
Stores->>API: GET /props
|
||||
API-->>Stores: server config
|
||||
Stores->>API: GET /v1/models
|
||||
API-->>Stores: single model (auto-selected)
|
||||
|
||||
Note over User,API: Chat Flow
|
||||
User->>UI: send message
|
||||
Stores->>DB: save user message
|
||||
Stores->>API: POST /v1/chat/completions (stream)
|
||||
loop streaming
|
||||
API-->>Stores: SSE chunks
|
||||
Stores-->>UI: reactive update
|
||||
end
|
||||
Stores->>DB: save assistant message
|
||||
```
|
||||
|
||||
### ROUTER Mode (Multi-Model)
|
||||
|
||||
See: [`docs/flows/data-flow-simplified-router-mode.md`](docs/flows/data-flow-simplified-router-mode.md)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant UI
|
||||
participant Stores
|
||||
participant API as llama-server
|
||||
|
||||
Note over User,API: Initialization
|
||||
Stores->>API: GET /props
|
||||
API-->>Stores: {role: "router"}
|
||||
Stores->>API: GET /models
|
||||
API-->>Stores: models[] with status
|
||||
|
||||
Note over User,API: Model Selection
|
||||
User->>UI: select model
|
||||
alt model not loaded
|
||||
Stores->>API: POST /models/load
|
||||
loop poll status
|
||||
Stores->>API: GET /models
|
||||
end
|
||||
Stores->>API: GET /props?model=X
|
||||
end
|
||||
Stores->>Stores: validate modalities
|
||||
|
||||
Note over User,API: Chat Flow
|
||||
Stores->>API: POST /v1/chat/completions {model: X}
|
||||
loop streaming
|
||||
API-->>Stores: SSE chunks + model info
|
||||
end
|
||||
```
|
||||
|
||||
### Detailed Flow Diagrams
|
||||
|
||||
| Flow | Description | File |
|
||||
| ------------- | ------------------------------------------ | ----------------------------------------------------------- |
|
||||
| Chat | Message lifecycle, streaming, regeneration | [`chat-flow.md`](docs/flows/chat-flow.md) |
|
||||
| Models | Loading, unloading, modality caching | [`models-flow.md`](docs/flows/models-flow.md) |
|
||||
| Server | Props fetching, role detection | [`server-flow.md`](docs/flows/server-flow.md) |
|
||||
| Conversations | CRUD, branching, import/export | [`conversations-flow.md`](docs/flows/conversations-flow.md) |
|
||||
| Database | IndexedDB schema, operations | [`database-flow.md`](docs/flows/database-flow.md) |
|
||||
| Settings | Parameter sync, user overrides | [`settings-flow.md`](docs/flows/settings-flow.md) |
|
||||
|
||||
---
|
||||
|
||||
## Architectural Patterns
|
||||
|
||||
### 1. Reactive State with Svelte 5 Runes
|
||||
|
||||
All stores use Svelte 5's fine-grained reactivity:
|
||||
|
||||
```typescript
|
||||
// Store with reactive state
|
||||
class ChatStore {
|
||||
#isLoading = $state(false);
|
||||
#currentResponse = $state('');
|
||||
|
||||
// Derived values auto-update
|
||||
get isStreaming() {
|
||||
return $derived(this.#isLoading && this.#currentResponse.length > 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Exported reactive accessors
|
||||
export const isLoading = () => chatStore.isLoading;
|
||||
export const currentResponse = () => chatStore.currentResponse;
|
||||
```
|
||||
|
||||
### 2. Unidirectional Data Flow
|
||||
|
||||
Data flows in one direction, making state predictable:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph UI["UI Layer"]
|
||||
A[User Action] --> B[Component]
|
||||
end
|
||||
|
||||
subgraph State["State Layer"]
|
||||
B --> C[Store Method]
|
||||
C --> D[State Update]
|
||||
end
|
||||
|
||||
subgraph IO["I/O Layer"]
|
||||
C --> E[Service]
|
||||
E --> F[API / IndexedDB]
|
||||
F -.->|Response| D
|
||||
end
|
||||
|
||||
D -->|Reactive| B
|
||||
```
|
||||
|
||||
Components dispatch actions to stores, stores coordinate with services for I/O, and state updates reactively propagate back to the UI.
|
||||
|
||||
### 3. Per-Conversation State
|
||||
|
||||
Enables concurrent streaming across multiple conversations:
|
||||
|
||||
```typescript
|
||||
class ChatStore {
|
||||
chatLoadingStates = new Map<string, boolean>();
|
||||
chatStreamingStates = new Map<string, { response: string; messageId: string }>();
|
||||
abortControllers = new Map<string, AbortController>();
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Message Branching with Tree Structure
|
||||
|
||||
Conversations are stored as a tree, not a linear list:
|
||||
|
||||
```typescript
|
||||
interface DatabaseMessage {
|
||||
id: string;
|
||||
parent: string | null; // Points to parent message
|
||||
children: string[]; // List of child message IDs
|
||||
// ...
|
||||
}
|
||||
|
||||
interface DatabaseConversation {
|
||||
currentNode: string; // Currently viewed branch tip
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Navigation between branches updates `currentNode` without losing history.
|
||||
|
||||
### 5. Layered Service Architecture
|
||||
|
||||
Stores handle state; services handle I/O:
|
||||
|
||||
```text
|
||||
┌─────────────────┐
|
||||
│ Stores │ Business logic, state management
|
||||
├─────────────────┤
|
||||
│ Services │ API calls, database operations
|
||||
├─────────────────┤
|
||||
│ Storage/API │ IndexedDB, LocalStorage, HTTP
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
### 6. Server Role Abstraction
|
||||
|
||||
Single codebase handles both MODEL and ROUTER modes:
|
||||
|
||||
```typescript
|
||||
// serverStore.ts
|
||||
get isRouterMode() {
|
||||
return this.role === ServerRole.ROUTER;
|
||||
}
|
||||
|
||||
// Components conditionally render based on mode
|
||||
{#if isRouterMode()}
|
||||
<ModelsSelector />
|
||||
{/if}
|
||||
```
|
||||
|
||||
### 7. Modality Validation
|
||||
|
||||
Prevents sending attachments to incompatible models:
|
||||
|
||||
```typescript
|
||||
// useModelChangeValidation hook
|
||||
const validate = (modelId: string) => {
|
||||
const modelModalities = modelsStore.getModelModalities(modelId);
|
||||
const conversationModalities = conversationsStore.usedModalities;
|
||||
|
||||
// Check if model supports all used modalities
|
||||
if (conversationModalities.hasImages && !modelModalities.vision) {
|
||||
return { valid: false, reason: 'Model does not support images' };
|
||||
}
|
||||
// ...
|
||||
};
|
||||
```
|
||||
|
||||
### 8. Persistent Storage Strategy
|
||||
|
||||
Data is persisted across sessions using two storage mechanisms:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Browser["Browser Storage"]
|
||||
subgraph IDB["IndexedDB (Dexie)"]
|
||||
C[Conversations]
|
||||
M[Messages]
|
||||
end
|
||||
subgraph LS["LocalStorage"]
|
||||
S[Settings Config]
|
||||
O[User Overrides]
|
||||
T[Theme Preference]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Stores["Svelte Stores"]
|
||||
CS[conversationsStore] --> C
|
||||
CS --> M
|
||||
SS[settingsStore] --> S
|
||||
SS --> O
|
||||
SS --> T
|
||||
end
|
||||
```
|
||||
|
||||
- **IndexedDB**: Conversations and messages (large, structured data)
|
||||
- **LocalStorage**: Settings, user parameter overrides, theme (small key-value data)
|
||||
- **Memory only**: Server props, model list (fetched fresh on each session)
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
### Test Types
|
||||
|
||||
| Type | Tool | Location | Command |
|
||||
| ------------- | ------------------ | ---------------- | ------------------- |
|
||||
| **Unit** | Vitest | `tests/unit/` | `npm run test:unit` |
|
||||
| **UI/Visual** | Storybook + Vitest | `tests/stories/` | `npm run test:ui` |
|
||||
| **E2E** | Playwright | `tests/e2e/` | `npm run test:e2e` |
|
||||
| **Client** | Vitest | `tests/client/`. | `npm run test:unit` |
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# All tests
|
||||
npm run test
|
||||
|
||||
# Individual test suites
|
||||
npm run test:e2e # End-to-end (requires llama-server)
|
||||
npm run test:client # Client-side unit tests
|
||||
npm run test:server # Server-side unit tests
|
||||
npm run test:ui # Storybook visual tests
|
||||
```
|
||||
|
||||
### Storybook Development
|
||||
|
||||
```bash
|
||||
npm run storybook # Start Storybook dev server on :6006
|
||||
npm run build-storybook # Build static Storybook
|
||||
```
|
||||
|
||||
### Linting and Formatting
|
||||
|
||||
```bash
|
||||
npm run lint # Check code style
|
||||
npm run format # Auto-format with Prettier
|
||||
npm run check # TypeScript type checking
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
```text
|
||||
tools/server/webui/
|
||||
├── src/
|
||||
│ ├── lib/
|
||||
│ │ ├── components/ # UI components (app/, ui/)
|
||||
│ │ ├── hooks/ # Svelte hooks
|
||||
│ │ ├── stores/ # State management
|
||||
│ │ ├── services/ # API and database services
|
||||
│ │ ├── types/ # TypeScript interfaces
|
||||
│ │ └── utils/ # Utility functions
|
||||
│ ├── routes/ # SvelteKit routes
|
||||
│ └── styles/ # Global styles
|
||||
├── static/ # Static assets
|
||||
├── tests/ # Test files
|
||||
├── docs/ # Architecture diagrams
|
||||
│ ├── architecture/ # High-level architecture
|
||||
│ └── flows/ # Feature-specific flows
|
||||
└── .storybook/ # Storybook configuration
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [llama.cpp Server README](../README.md) - Full server documentation
|
||||
- [Multimodal Documentation](../../../docs/multimodal.md) - Image and audio support
|
||||
- [Function Calling](../../../docs/function-calling.md) - Tool use capabilities
|
||||
16
tools/server/webui/components.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"$schema": "https://shadcn-svelte.com/schema.json",
|
||||
"tailwind": {
|
||||
"css": "src/app.css",
|
||||
"baseColor": "neutral"
|
||||
},
|
||||
"aliases": {
|
||||
"components": "$lib/components",
|
||||
"utils": "$lib/components/ui/utils",
|
||||
"ui": "$lib/components/ui",
|
||||
"hooks": "$lib/hooks",
|
||||
"lib": "$lib"
|
||||
},
|
||||
"typescript": true,
|
||||
"registry": "https://shadcn-svelte.com/registry"
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Routes["📍 Routes"]
|
||||
R1["/ (Welcome)"]
|
||||
R2["/chat/[id]"]
|
||||
RL["+layout.svelte"]
|
||||
end
|
||||
|
||||
subgraph Components["🧩 Components"]
|
||||
C_Sidebar["ChatSidebar"]
|
||||
C_Screen["ChatScreen"]
|
||||
C_Form["ChatForm"]
|
||||
C_Messages["ChatMessages"]
|
||||
C_Message["ChatMessage"]
|
||||
C_MessageEditForm["ChatMessageEditForm"]
|
||||
C_ModelsSelector["ModelsSelector"]
|
||||
C_Settings["ChatSettings"]
|
||||
end
|
||||
|
||||
subgraph Hooks["🪝 Hooks"]
|
||||
H1["useModelChangeValidation"]
|
||||
H2["useProcessingState"]
|
||||
end
|
||||
|
||||
subgraph Stores["🗄️ Stores"]
|
||||
S1["chatStore<br/><i>Chat interactions & streaming</i>"]
|
||||
S2["conversationsStore<br/><i>Conversation data & messages</i>"]
|
||||
S3["modelsStore<br/><i>Model selection & loading</i>"]
|
||||
S4["serverStore<br/><i>Server props & role detection</i>"]
|
||||
S5["settingsStore<br/><i>User configuration</i>"]
|
||||
end
|
||||
|
||||
subgraph Services["⚙️ Services"]
|
||||
SV1["ChatService"]
|
||||
SV2["ModelsService"]
|
||||
SV3["PropsService"]
|
||||
SV4["DatabaseService"]
|
||||
SV5["ParameterSyncService"]
|
||||
end
|
||||
|
||||
subgraph Storage["💾 Storage"]
|
||||
ST1["IndexedDB<br/><i>conversations, messages</i>"]
|
||||
ST2["LocalStorage<br/><i>config, userOverrides</i>"]
|
||||
end
|
||||
|
||||
subgraph APIs["🌐 llama-server API"]
|
||||
API1["/v1/chat/completions"]
|
||||
API2["/props"]
|
||||
API3["/models/*"]
|
||||
API4["/v1/models"]
|
||||
end
|
||||
|
||||
%% Routes → Components
|
||||
R1 & R2 --> C_Screen
|
||||
RL --> C_Sidebar
|
||||
|
||||
%% Component hierarchy
|
||||
C_Screen --> C_Form & C_Messages & C_Settings
|
||||
C_Messages --> C_Message
|
||||
C_Message --> C_MessageEditForm
|
||||
C_Form & C_MessageEditForm --> C_ModelsSelector
|
||||
|
||||
%% Components → Hooks → Stores
|
||||
C_Form & C_Messages --> H1 & H2
|
||||
H1 --> S3 & S4
|
||||
H2 --> S1 & S5
|
||||
|
||||
%% Components → Stores
|
||||
C_Screen --> S1 & S2
|
||||
C_Sidebar --> S2
|
||||
C_ModelsSelector --> S3 & S4
|
||||
C_Settings --> S5
|
||||
|
||||
%% Stores → Services
|
||||
S1 --> SV1 & SV4
|
||||
S2 --> SV4
|
||||
S3 --> SV2 & SV3
|
||||
S4 --> SV3
|
||||
S5 --> SV5
|
||||
|
||||
%% Services → Storage
|
||||
SV4 --> ST1
|
||||
SV5 --> ST2
|
||||
|
||||
%% Services → APIs
|
||||
SV1 --> API1
|
||||
SV2 --> API3 & API4
|
||||
SV3 --> API2
|
||||
|
||||
%% Styling
|
||||
classDef routeStyle fill:#e1f5fe,stroke:#01579b,stroke-width:2px
|
||||
classDef componentStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px
|
||||
classDef hookStyle fill:#fff8e1,stroke:#ff8f00,stroke-width:2px
|
||||
classDef storeStyle fill:#fff3e0,stroke:#e65100,stroke-width:2px
|
||||
classDef serviceStyle fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px
|
||||
classDef storageStyle fill:#fce4ec,stroke:#c2185b,stroke-width:2px
|
||||
classDef apiStyle fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
|
||||
|
||||
class R1,R2,RL routeStyle
|
||||
class C_Sidebar,C_Screen,C_Form,C_Messages,C_Message,C_MessageEditForm,C_ModelsSelector,C_Settings componentStyle
|
||||
class H1,H2 hookStyle
|
||||
class S1,S2,S3,S4,S5 storeStyle
|
||||
class SV1,SV2,SV3,SV4,SV5 serviceStyle
|
||||
class ST1,ST2 storageStyle
|
||||
class API1,API2,API3,API4 apiStyle
|
||||
```
|
||||
279
tools/server/webui/docs/architecture/high-level-architecture.md
Normal file
@@ -0,0 +1,279 @@
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Routes["📍 Routes"]
|
||||
R1["/ (+page.svelte)"]
|
||||
R2["/chat/[id]"]
|
||||
RL["+layout.svelte"]
|
||||
end
|
||||
|
||||
subgraph Components["🧩 Components"]
|
||||
direction TB
|
||||
subgraph LayoutComponents["Layout"]
|
||||
C_Sidebar["ChatSidebar"]
|
||||
C_Screen["ChatScreen"]
|
||||
end
|
||||
subgraph ChatUIComponents["Chat UI"]
|
||||
C_Form["ChatForm"]
|
||||
C_Messages["ChatMessages"]
|
||||
C_Message["ChatMessage"]
|
||||
C_MessageUser["ChatMessageUser"]
|
||||
C_MessageEditForm["ChatMessageEditForm"]
|
||||
C_Attach["ChatAttachments"]
|
||||
C_ModelsSelector["ModelsSelector"]
|
||||
C_Settings["ChatSettings"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Hooks["🪝 Hooks"]
|
||||
H1["useModelChangeValidation"]
|
||||
H2["useProcessingState"]
|
||||
H3["isMobile"]
|
||||
end
|
||||
|
||||
subgraph Stores["🗄️ Stores"]
|
||||
direction TB
|
||||
subgraph S1["chatStore"]
|
||||
S1State["<b>State:</b><br/>isLoading, currentResponse<br/>errorDialogState<br/>activeProcessingState<br/>chatLoadingStates<br/>chatStreamingStates<br/>abortControllers<br/>processingStates<br/>activeConversationId<br/>isStreamingActive"]
|
||||
S1LoadState["<b>Loading State:</b><br/>setChatLoading()<br/>isChatLoading()<br/>syncLoadingStateForChat()<br/>clearUIState()<br/>isChatLoadingPublic()<br/>getAllLoadingChats()<br/>getAllStreamingChats()"]
|
||||
S1ProcState["<b>Processing State:</b><br/>setActiveProcessingConversation()<br/>getProcessingState()<br/>clearProcessingState()<br/>getActiveProcessingState()<br/>updateProcessingStateFromTimings()<br/>getCurrentProcessingStateSync()<br/>restoreProcessingStateFromMessages()"]
|
||||
S1Stream["<b>Streaming:</b><br/>streamChatCompletion()<br/>startStreaming()<br/>stopStreaming()<br/>stopGeneration()<br/>isStreaming()"]
|
||||
S1Error["<b>Error Handling:</b><br/>showErrorDialog()<br/>dismissErrorDialog()<br/>isAbortError()"]
|
||||
S1Msg["<b>Message Operations:</b><br/>addMessage()<br/>sendMessage()<br/>updateMessage()<br/>deleteMessage()<br/>getDeletionInfo()"]
|
||||
S1Regen["<b>Regeneration:</b><br/>regenerateMessage()<br/>regenerateMessageWithBranching()<br/>continueAssistantMessage()"]
|
||||
S1Edit["<b>Editing:</b><br/>editAssistantMessage()<br/>editUserMessagePreserveResponses()<br/>editMessageWithBranching()<br/>clearEditMode()<br/>isEditModeActive()<br/>getAddFilesHandler()<br/>setEditModeActive()"]
|
||||
S1Utils["<b>Utilities:</b><br/>getApiOptions()<br/>parseTimingData()<br/>getOrCreateAbortController()<br/>getConversationModel()"]
|
||||
end
|
||||
subgraph S2["conversationsStore"]
|
||||
S2State["<b>State:</b><br/>conversations<br/>activeConversation<br/>activeMessages<br/>usedModalities<br/>isInitialized<br/>titleUpdateConfirmationCallback"]
|
||||
S2Modal["<b>Modalities:</b><br/>getModalitiesUpToMessage()<br/>calculateModalitiesFromMessages()"]
|
||||
S2Lifecycle["<b>Lifecycle:</b><br/>initialize()<br/>loadConversations()<br/>clearActiveConversation()"]
|
||||
S2ConvCRUD["<b>Conversation CRUD:</b><br/>createConversation()<br/>loadConversation()<br/>deleteConversation()<br/>updateConversationName()<br/>updateConversationTitleWithConfirmation()"]
|
||||
S2MsgMgmt["<b>Message Management:</b><br/>refreshActiveMessages()<br/>addMessageToActive()<br/>updateMessageAtIndex()<br/>findMessageIndex()<br/>sliceActiveMessages()<br/>removeMessageAtIndex()<br/>getConversationMessages()"]
|
||||
S2Nav["<b>Navigation:</b><br/>navigateToSibling()<br/>updateCurrentNode()<br/>updateConversationTimestamp()"]
|
||||
S2Export["<b>Import/Export:</b><br/>downloadConversation()<br/>exportAllConversations()<br/>importConversations()<br/>triggerDownload()"]
|
||||
S2Utils["<b>Utilities:</b><br/>setTitleUpdateConfirmationCallback()"]
|
||||
end
|
||||
subgraph S3["modelsStore"]
|
||||
S3State["<b>State:</b><br/>models, routerModels<br/>selectedModelId<br/>selectedModelName<br/>loading, updating, error<br/>modelLoadingStates<br/>modelPropsCache<br/>modelPropsFetching<br/>propsCacheVersion"]
|
||||
S3Getters["<b>Computed Getters:</b><br/>selectedModel<br/>loadedModelIds<br/>loadingModelIds<br/>singleModelName"]
|
||||
S3Modal["<b>Modalities:</b><br/>getModelModalities()<br/>modelSupportsVision()<br/>modelSupportsAudio()<br/>getModelModalitiesArray()<br/>getModelProps()<br/>updateModelModalities()"]
|
||||
S3Status["<b>Status Queries:</b><br/>isModelLoaded()<br/>isModelOperationInProgress()<br/>getModelStatus()<br/>isModelPropsFetching()"]
|
||||
S3Fetch["<b>Data Fetching:</b><br/>fetch()<br/>fetchRouterModels()<br/>fetchModelProps()<br/>fetchModalitiesForLoadedModels()"]
|
||||
S3Select["<b>Model Selection:</b><br/>selectModelById()<br/>selectModelByName()<br/>clearSelection()<br/>findModelByName()<br/>findModelById()<br/>hasModel()"]
|
||||
S3LoadUnload["<b>Loading/Unloading Models:</b><br/>loadModel()<br/>unloadModel()<br/>ensureModelLoaded()<br/>waitForModelStatus()<br/>pollForModelStatus()"]
|
||||
S3Utils["<b>Utilities:</b><br/>toDisplayName()<br/>clear()"]
|
||||
end
|
||||
subgraph S4["serverStore"]
|
||||
S4State["<b>State:</b><br/>props<br/>loading, error<br/>role<br/>fetchPromise"]
|
||||
S4Getters["<b>Getters:</b><br/>defaultParams<br/>contextSize<br/>isRouterMode<br/>isModelMode"]
|
||||
S4Data["<b>Data Handling:</b><br/>fetch()<br/>getErrorMessage()<br/>clear()"]
|
||||
S4Utils["<b>Utilities:</b><br/>detectRole()"]
|
||||
end
|
||||
subgraph S5["settingsStore"]
|
||||
S5State["<b>State:</b><br/>config<br/>theme<br/>isInitialized<br/>userOverrides"]
|
||||
S5Lifecycle["<b>Lifecycle:</b><br/>initialize()<br/>loadConfig()<br/>saveConfig()<br/>loadTheme()<br/>saveTheme()"]
|
||||
S5Update["<b>Config Updates:</b><br/>updateConfig()<br/>updateMultipleConfig()<br/>updateTheme()"]
|
||||
S5Reset["<b>Reset:</b><br/>resetConfig()<br/>resetTheme()<br/>resetAll()<br/>resetParameterToServerDefault()"]
|
||||
S5Sync["<b>Server Sync:</b><br/>syncWithServerDefaults()<br/>forceSyncWithServerDefaults()"]
|
||||
S5Utils["<b>Utilities:</b><br/>getConfig()<br/>getAllConfig()<br/>getParameterInfo()<br/>getParameterDiff()<br/>getServerDefaults()<br/>clearAllUserOverrides()"]
|
||||
end
|
||||
|
||||
subgraph ReactiveExports["⚡ Reactive Exports"]
|
||||
direction LR
|
||||
subgraph ChatExports["chatStore"]
|
||||
RE1["isLoading()"]
|
||||
RE2["currentResponse()"]
|
||||
RE3["errorDialog()"]
|
||||
RE4["activeProcessingState()"]
|
||||
RE5["isChatStreaming()"]
|
||||
RE6["isChatLoading()"]
|
||||
RE7["getChatStreaming()"]
|
||||
RE8["getAllLoadingChats()"]
|
||||
RE9["getAllStreamingChats()"]
|
||||
RE9a["isEditModeActive()"]
|
||||
RE9b["getAddFilesHandler()"]
|
||||
RE9c["setEditModeActive()"]
|
||||
RE9d["clearEditMode()"]
|
||||
end
|
||||
subgraph ConvExports["conversationsStore"]
|
||||
RE10["conversations()"]
|
||||
RE11["activeConversation()"]
|
||||
RE12["activeMessages()"]
|
||||
RE13["isConversationsInitialized()"]
|
||||
RE14["usedModalities()"]
|
||||
end
|
||||
subgraph ModelsExports["modelsStore"]
|
||||
RE15["modelOptions()"]
|
||||
RE16["routerModels()"]
|
||||
RE17["modelsLoading()"]
|
||||
RE18["modelsUpdating()"]
|
||||
RE19["modelsError()"]
|
||||
RE20["selectedModelId()"]
|
||||
RE21["selectedModelName()"]
|
||||
RE22["selectedModelOption()"]
|
||||
RE23["loadedModelIds()"]
|
||||
RE24["loadingModelIds()"]
|
||||
RE25["propsCacheVersion()"]
|
||||
RE26["singleModelName()"]
|
||||
end
|
||||
subgraph ServerExports["serverStore"]
|
||||
RE27["serverProps()"]
|
||||
RE28["serverLoading()"]
|
||||
RE29["serverError()"]
|
||||
RE30["serverRole()"]
|
||||
RE31["defaultParams()"]
|
||||
RE32["contextSize()"]
|
||||
RE33["isRouterMode()"]
|
||||
RE34["isModelMode()"]
|
||||
end
|
||||
subgraph SettingsExports["settingsStore"]
|
||||
RE35["config()"]
|
||||
RE36["theme()"]
|
||||
RE37["isInitialized()"]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Services["⚙️ Services"]
|
||||
direction TB
|
||||
subgraph SV1["ChatService"]
|
||||
SV1Msg["<b>Messaging:</b><br/>sendMessage()"]
|
||||
SV1Stream["<b>Streaming:</b><br/>handleStreamResponse()<br/>parseSSEChunk()"]
|
||||
SV1Convert["<b>Conversion:</b><br/>convertMessageToChatData()<br/>convertExtraToApiFormat()"]
|
||||
SV1Utils["<b>Utilities:</b><br/>extractReasoningContent()<br/>getServerProps()<br/>getModels()"]
|
||||
end
|
||||
subgraph SV2["ModelsService"]
|
||||
SV2List["<b>Listing:</b><br/>list()<br/>listRouter()"]
|
||||
SV2LoadUnload["<b>Load/Unload:</b><br/>load()<br/>unload()"]
|
||||
SV2Status["<b>Status:</b><br/>isModelLoaded()<br/>isModelLoading()"]
|
||||
end
|
||||
subgraph SV3["PropsService"]
|
||||
SV3Fetch["<b>Fetching:</b><br/>fetch()<br/>fetchForModel()"]
|
||||
end
|
||||
subgraph SV4["DatabaseService"]
|
||||
SV4Conv["<b>Conversations:</b><br/>createConversation()<br/>getConversation()<br/>getAllConversations()<br/>updateConversation()<br/>deleteConversation()"]
|
||||
SV4Msg["<b>Messages:</b><br/>createMessageBranch()<br/>createRootMessage()<br/>getConversationMessages()<br/>updateMessage()<br/>deleteMessage()<br/>deleteMessageCascading()"]
|
||||
SV4Node["<b>Navigation:</b><br/>updateCurrentNode()"]
|
||||
SV4Import["<b>Import:</b><br/>importConversations()"]
|
||||
end
|
||||
subgraph SV5["ParameterSyncService"]
|
||||
SV5Extract["<b>Extraction:</b><br/>extractServerDefaults()"]
|
||||
SV5Merge["<b>Merging:</b><br/>mergeWithServerDefaults()"]
|
||||
SV5Info["<b>Info:</b><br/>getParameterInfo()<br/>canSyncParameter()<br/>getSyncableParameterKeys()<br/>validateServerParameter()"]
|
||||
SV5Diff["<b>Diff:</b><br/>createParameterDiff()"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Storage["💾 Storage"]
|
||||
ST1["IndexedDB"]
|
||||
ST2["conversations"]
|
||||
ST3["messages"]
|
||||
ST5["LocalStorage"]
|
||||
ST6["config"]
|
||||
ST7["userOverrides"]
|
||||
end
|
||||
|
||||
subgraph APIs["🌐 llama-server API"]
|
||||
API1["/v1/chat/completions"]
|
||||
API2["/props<br/>/props?model="]
|
||||
API3["/models<br/>/models/load<br/>/models/unload"]
|
||||
API4["/v1/models"]
|
||||
end
|
||||
|
||||
%% Routes render Components
|
||||
R1 --> C_Screen
|
||||
R2 --> C_Screen
|
||||
RL --> C_Sidebar
|
||||
|
||||
%% Component hierarchy
|
||||
C_Screen --> C_Form & C_Messages & C_Settings
|
||||
C_Messages --> C_Message
|
||||
C_Message --> C_MessageUser
|
||||
C_MessageUser --> C_MessageEditForm
|
||||
C_MessageEditForm --> C_ModelsSelector
|
||||
C_MessageEditForm --> C_Attach
|
||||
C_Form --> C_ModelsSelector
|
||||
C_Form --> C_Attach
|
||||
C_Message --> C_Attach
|
||||
|
||||
%% Components use Hooks
|
||||
C_Form --> H1
|
||||
C_Message --> H1 & H2
|
||||
C_MessageEditForm --> H1
|
||||
C_Screen --> H2
|
||||
|
||||
%% Hooks use Stores
|
||||
H1 --> S3 & S4
|
||||
H2 --> S1 & S5
|
||||
|
||||
%% Components use Stores
|
||||
C_Screen --> S1 & S2
|
||||
C_Messages --> S2
|
||||
C_Message --> S1 & S2 & S3
|
||||
C_Form --> S1 & S3
|
||||
C_Sidebar --> S2
|
||||
C_ModelsSelector --> S3 & S4
|
||||
C_Settings --> S5
|
||||
|
||||
%% Stores export Reactive State
|
||||
S1 -. exports .-> ChatExports
|
||||
S2 -. exports .-> ConvExports
|
||||
S3 -. exports .-> ModelsExports
|
||||
S4 -. exports .-> ServerExports
|
||||
S5 -. exports .-> SettingsExports
|
||||
|
||||
%% Stores use Services
|
||||
S1 --> SV1 & SV4
|
||||
S2 --> SV4
|
||||
S3 --> SV2 & SV3
|
||||
S4 --> SV3
|
||||
S5 --> SV5
|
||||
|
||||
%% Services to Storage
|
||||
SV4 --> ST1
|
||||
ST1 --> ST2 & ST3
|
||||
SV5 --> ST5
|
||||
ST5 --> ST6 & ST7
|
||||
|
||||
%% Services to APIs
|
||||
SV1 --> API1
|
||||
SV2 --> API3 & API4
|
||||
SV3 --> API2
|
||||
|
||||
%% Styling
|
||||
classDef routeStyle fill:#e1f5fe,stroke:#01579b,stroke-width:2px
|
||||
classDef componentStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px
|
||||
classDef componentGroupStyle fill:#e1bee7,stroke:#7b1fa2,stroke-width:1px
|
||||
classDef storeStyle fill:#fff3e0,stroke:#e65100,stroke-width:2px
|
||||
classDef stateStyle fill:#ffe0b2,stroke:#e65100,stroke-width:1px
|
||||
classDef methodStyle fill:#ffecb3,stroke:#e65100,stroke-width:1px
|
||||
classDef reactiveStyle fill:#fffde7,stroke:#f9a825,stroke-width:1px
|
||||
classDef serviceStyle fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px
|
||||
classDef serviceMStyle fill:#c8e6c9,stroke:#2e7d32,stroke-width:1px
|
||||
classDef storageStyle fill:#fce4ec,stroke:#c2185b,stroke-width:2px
|
||||
classDef apiStyle fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
|
||||
|
||||
class R1,R2,RL routeStyle
|
||||
class C_Sidebar,C_Screen,C_Form,C_Messages,C_Message,C_MessageUser,C_MessageEditForm componentStyle
|
||||
class C_ModelsSelector,C_Settings componentStyle
|
||||
class C_Attach componentStyle
|
||||
class H1,H2,H3 methodStyle
|
||||
class LayoutComponents,ChatUIComponents componentGroupStyle
|
||||
class Hooks storeStyle
|
||||
class S1,S2,S3,S4,S5 storeStyle
|
||||
class S1State,S2State,S3State,S4State,S5State stateStyle
|
||||
class S1Msg,S1Regen,S1Edit,S1Stream,S1LoadState,S1ProcState,S1Error,S1Utils methodStyle
|
||||
class S2Lifecycle,S2ConvCRUD,S2MsgMgmt,S2Nav,S2Modal,S2Export,S2Utils methodStyle
|
||||
class S3Getters,S3Modal,S3Status,S3Fetch,S3Select,S3LoadUnload,S3Utils methodStyle
|
||||
class S4Getters,S4Data,S4Utils methodStyle
|
||||
class S5Lifecycle,S5Update,S5Reset,S5Sync,S5Utils methodStyle
|
||||
class ChatExports,ConvExports,ModelsExports,ServerExports,SettingsExports reactiveStyle
|
||||
class SV1,SV2,SV3,SV4,SV5 serviceStyle
|
||||
class SV1Msg,SV1Stream,SV1Convert,SV1Utils serviceMStyle
|
||||
class SV2List,SV2LoadUnload,SV2Status serviceMStyle
|
||||
class SV3Fetch serviceMStyle
|
||||
class SV4Conv,SV4Msg,SV4Node,SV4Import serviceMStyle
|
||||
class SV5Extract,SV5Merge,SV5Info,SV5Diff serviceMStyle
|
||||
class ST1,ST2,ST3,ST5,ST6,ST7 storageStyle
|
||||
class API1,API2,API3,API4 apiStyle
|
||||
```
|
||||
174
tools/server/webui/docs/flows/chat-flow.md
Normal file
@@ -0,0 +1,174 @@
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant UI as 🧩 ChatForm / ChatMessage
|
||||
participant chatStore as 🗄️ chatStore
|
||||
participant convStore as 🗄️ conversationsStore
|
||||
participant settingsStore as 🗄️ settingsStore
|
||||
participant ChatSvc as ⚙️ ChatService
|
||||
participant DbSvc as ⚙️ DatabaseService
|
||||
participant API as 🌐 /v1/chat/completions
|
||||
|
||||
Note over chatStore: State:<br/>isLoading, currentResponse<br/>errorDialogState, activeProcessingState<br/>chatLoadingStates (Map)<br/>chatStreamingStates (Map)<br/>abortControllers (Map)<br/>processingStates (Map)
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: 💬 SEND MESSAGE
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
UI->>chatStore: sendMessage(content, extras)
|
||||
activate chatStore
|
||||
|
||||
chatStore->>chatStore: setChatLoading(convId, true)
|
||||
chatStore->>chatStore: clearChatStreaming(convId)
|
||||
|
||||
alt no active conversation
|
||||
chatStore->>convStore: createConversation()
|
||||
Note over convStore: → see conversations-flow.mmd
|
||||
end
|
||||
|
||||
chatStore->>chatStore: addMessage("user", content, extras)
|
||||
chatStore->>DbSvc: createMessageBranch(userMsg, parentId)
|
||||
chatStore->>convStore: addMessageToActive(userMsg)
|
||||
chatStore->>convStore: updateCurrentNode(userMsg.id)
|
||||
|
||||
chatStore->>chatStore: createAssistantMessage(userMsg.id)
|
||||
chatStore->>DbSvc: createMessageBranch(assistantMsg, userMsg.id)
|
||||
chatStore->>convStore: addMessageToActive(assistantMsg)
|
||||
|
||||
chatStore->>chatStore: streamChatCompletion(messages, assistantMsg)
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: 🌊 STREAMING
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
activate chatStore
|
||||
chatStore->>chatStore: startStreaming()
|
||||
Note right of chatStore: isStreamingActive = true
|
||||
|
||||
chatStore->>chatStore: setActiveProcessingConversation(convId)
|
||||
chatStore->>chatStore: getOrCreateAbortController(convId)
|
||||
Note right of chatStore: abortControllers.set(convId, new AbortController())
|
||||
|
||||
chatStore->>chatStore: getApiOptions()
|
||||
Note right of chatStore: Merge from settingsStore.config:<br/>temperature, max_tokens, top_p, etc.
|
||||
|
||||
chatStore->>ChatSvc: sendMessage(messages, options, signal)
|
||||
activate ChatSvc
|
||||
|
||||
ChatSvc->>ChatSvc: convertMessageToChatData(messages)
|
||||
Note right of ChatSvc: DatabaseMessage[] → ApiChatMessageData[]<br/>Process attachments (images, PDFs, audio)
|
||||
|
||||
ChatSvc->>API: POST /v1/chat/completions
|
||||
Note right of API: {messages, model?, stream: true, ...params}
|
||||
|
||||
loop SSE chunks
|
||||
API-->>ChatSvc: data: {"choices":[{"delta":{...}}]}
|
||||
ChatSvc->>ChatSvc: parseSSEChunk(line)
|
||||
|
||||
alt content chunk
|
||||
ChatSvc-->>chatStore: onChunk(content)
|
||||
chatStore->>chatStore: setChatStreaming(convId, response, msgId)
|
||||
Note right of chatStore: currentResponse = $state(accumulated)
|
||||
chatStore->>convStore: updateMessageAtIndex(idx, {content})
|
||||
end
|
||||
|
||||
alt reasoning chunk
|
||||
ChatSvc-->>chatStore: onReasoningChunk(reasoning)
|
||||
chatStore->>convStore: updateMessageAtIndex(idx, {thinking})
|
||||
end
|
||||
|
||||
alt tool_calls chunk
|
||||
ChatSvc-->>chatStore: onToolCallChunk(toolCalls)
|
||||
chatStore->>convStore: updateMessageAtIndex(idx, {toolCalls})
|
||||
end
|
||||
|
||||
alt model info
|
||||
ChatSvc-->>chatStore: onModel(modelName)
|
||||
chatStore->>chatStore: recordModel(modelName)
|
||||
chatStore->>DbSvc: updateMessage(msgId, {model})
|
||||
end
|
||||
|
||||
alt timings (during stream)
|
||||
ChatSvc-->>chatStore: onTimings(timings, promptProgress)
|
||||
chatStore->>chatStore: updateProcessingStateFromTimings()
|
||||
end
|
||||
|
||||
chatStore-->>UI: reactive $state update
|
||||
end
|
||||
|
||||
API-->>ChatSvc: data: [DONE]
|
||||
ChatSvc-->>chatStore: onComplete(content, reasoning, timings, toolCalls)
|
||||
deactivate ChatSvc
|
||||
|
||||
chatStore->>chatStore: stopStreaming()
|
||||
chatStore->>DbSvc: updateMessage(msgId, {content, timings, model})
|
||||
chatStore->>convStore: updateCurrentNode(msgId)
|
||||
chatStore->>chatStore: setChatLoading(convId, false)
|
||||
chatStore->>chatStore: clearChatStreaming(convId)
|
||||
chatStore->>chatStore: clearProcessingState(convId)
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: ⏹️ STOP GENERATION
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
UI->>chatStore: stopGeneration()
|
||||
activate chatStore
|
||||
chatStore->>chatStore: savePartialResponseIfNeeded(convId)
|
||||
Note right of chatStore: Save currentResponse to DB if non-empty
|
||||
chatStore->>chatStore: abortControllers.get(convId).abort()
|
||||
Note right of chatStore: fetch throws AbortError → caught by isAbortError()
|
||||
chatStore->>chatStore: stopStreaming()
|
||||
chatStore->>chatStore: setChatLoading(convId, false)
|
||||
chatStore->>chatStore: clearChatStreaming(convId)
|
||||
chatStore->>chatStore: clearProcessingState(convId)
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: 🔁 REGENERATE
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
UI->>chatStore: regenerateMessageWithBranching(msgId, model?)
|
||||
activate chatStore
|
||||
chatStore->>convStore: findMessageIndex(msgId)
|
||||
chatStore->>chatStore: Get parent of target message
|
||||
chatStore->>chatStore: createAssistantMessage(parentId)
|
||||
chatStore->>DbSvc: createMessageBranch(newAssistantMsg, parentId)
|
||||
chatStore->>convStore: refreshActiveMessages()
|
||||
Note right of chatStore: Same streaming flow
|
||||
chatStore->>chatStore: streamChatCompletion(...)
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: ➡️ CONTINUE
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
UI->>chatStore: continueAssistantMessage(msgId)
|
||||
activate chatStore
|
||||
chatStore->>chatStore: Get existing content from message
|
||||
chatStore->>chatStore: streamChatCompletion(..., existingContent)
|
||||
Note right of chatStore: Appends to existing message content
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: ✏️ EDIT USER MESSAGE
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
UI->>chatStore: editUserMessagePreserveResponses(msgId, newContent)
|
||||
activate chatStore
|
||||
chatStore->>chatStore: Get parent of target message
|
||||
chatStore->>DbSvc: createMessageBranch(editedMsg, parentId)
|
||||
chatStore->>convStore: refreshActiveMessages()
|
||||
Note right of chatStore: Creates new branch, original preserved
|
||||
deactivate chatStore
|
||||
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
Note over UI,API: ❌ ERROR HANDLING
|
||||
%% ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
Note over chatStore: On stream error (non-abort):
|
||||
chatStore->>chatStore: showErrorDialog(type, message)
|
||||
Note right of chatStore: errorDialogState = {type: 'timeout'|'server', message}
|
||||
chatStore->>convStore: removeMessageAtIndex(failedMsgIdx)
|
||||
chatStore->>DbSvc: deleteMessage(failedMsgId)
|
||||
```
|
||||