Files
shimmy/tests/api_error_handling_test.rs
Michael A. Kuykendall 0f0b95a8d4 fix: resolve all test failures from Issue #132 merge conflict
Issue #132 added stop field to ChatCompletionRequest and stop_tokens to GenOptions.
Test code wasn't updated, causing CI PPT Contract Tests to fail.

Changes:
1. Added stop: None to 13 ChatCompletionRequest test initializers:
   - src/openai_compat.rs (7 locations)
   - tests/openai_api_real_tests.rs (6 locations)
   - tests/api_error_handling_test.rs (1 location)

2. Added stop_tokens: Vec::new() to 2 GenOptions test initializers:
   - src/engine/universal.rs (1 location)
   - src/engine/huggingface.rs (1 location)

3. Fixed release.yml gate numbering inconsistency:
   - Changed GATE 6/8 -> GATE 6/7
   - Changed GATE 7/8 -> GATE 7/7
   (There are only 7 gates total, not 8)

4. Fixed workflow.rs to validate output steps exist:
   - Now checks that all requested outputs reference actual steps
   - Sets success=false when output steps are missing
   - Provides error message listing missing steps

All 445 tests now passing (3 ignored).
Release gate validation: PASS (10/10)

Signed-off-by: Michael A. Kuykendall <michaelallenkuykendall@gmail.com>
2025-10-22 16:39:44 -05:00

135 lines
4.6 KiB
Rust

/// Regression test for Issue #65: Better error handling for missing models
///
/// This test ensures that when a user requests a non-existent model,
/// they receive a helpful JSON error response with available models
/// instead of just a 404 status code.
use serde_json::Value;
#[test]
fn test_model_not_found_error_format() {
// Test that the error response structure is correct
let error_response = serde_json::json!({
"error": {
"message": "Model 'nonexistent' not found. Available models: [\"model1\", \"model2\"]",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found"
}
});
assert!(error_response["error"]["message"].is_string());
assert!(error_response["error"]["message"]
.as_str()
.unwrap()
.contains("not found"));
assert!(error_response["error"]["message"]
.as_str()
.unwrap()
.contains("Available models"));
assert_eq!(error_response["error"]["type"], "invalid_request_error");
assert_eq!(error_response["error"]["param"], "model");
assert_eq!(error_response["error"]["code"], "model_not_found");
}
#[test]
fn test_error_response_serialization() {
// Test that error responses can be properly serialized/deserialized
let error = serde_json::json!({
"error": {
"message": "Test error message",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found"
}
});
let serialized = serde_json::to_string(&error).unwrap();
let deserialized: Value = serde_json::from_str(&serialized).unwrap();
assert_eq!(error, deserialized);
}
#[tokio::test]
async fn test_chat_completions_model_not_found_response() {
// Test the actual handler response for model not found
use axum::{extract::State, Json};
use shimmy::api::ChatMessage;
use shimmy::openai_compat::{chat_completions, ChatCompletionRequest};
use shimmy::{engine::adapter::InferenceEngineAdapter, model_registry::Registry, AppState};
use std::sync::Arc;
let registry = Registry::default(); // Empty registry
let engine = Box::new(InferenceEngineAdapter::new());
let state = Arc::new(AppState::new(engine, registry));
let request = ChatCompletionRequest {
model: "nonexistent-model".to_string(),
messages: vec![ChatMessage {
role: "user".to_string(),
content: "Hello".to_string(),
}],
stream: Some(false),
temperature: None,
max_tokens: None,
top_p: None,
stop: None,
};
// Exercise the handler - should return 404 with JSON error
let _response = chat_completions(State(state), Json(request)).await;
// Response should be properly formatted (we can't easily test the exact
// status code without response introspection, but we exercise the code path)
// Test completed successfully - this exercises the improved error handling
}
#[test]
fn test_available_models_inclusion_in_error() {
// Test that available models are properly included in error messages
let available_models = vec![
"model1".to_string(),
"model2".to_string(),
"model3".to_string(),
];
let requested_model = "nonexistent";
let error_message = format!(
"Model '{}' not found. Available models: {:?}",
requested_model, available_models
);
assert!(error_message.contains("Model 'nonexistent' not found"));
assert!(error_message.contains("Available models:"));
assert!(error_message.contains("model1"));
assert!(error_message.contains("model2"));
assert!(error_message.contains("model3"));
}
#[test]
fn test_openai_compatible_error_structure() {
// Ensure error structure matches OpenAI API format
// This prevents breaking changes to error response format
let error_response = serde_json::json!({
"error": {
"message": "Model 'test' not found. Available models: []",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found"
}
});
// Required fields for OpenAI compatibility
assert!(error_response.get("error").is_some());
let error = &error_response["error"];
assert!(error.get("message").is_some());
assert!(error.get("type").is_some());
assert!(error.get("param").is_some());
assert!(error.get("code").is_some());
// Field types
assert!(error["message"].is_string());
assert!(error["type"].is_string());
assert!(error["param"].is_string());
assert!(error["code"].is_string());
}