rustGPT/tests/tui_performance_tests.rs

207 lines
6.7 KiB
Rust

use gpt_cli_rust::utils::{Display, InputHandler};
use std::time::{Duration, Instant};
#[test]
fn test_display_performance() {
let display = Display::new();
// Test performance of basic operations
let start = Instant::now();
for _ in 0..100 {
display.print_info("Test message");
display.print_error("Test error");
display.print_warning("Test warning");
}
let duration = start.elapsed();
// Should complete in reasonable time (less than 1 second for 300 operations)
assert!(duration < Duration::from_secs(1),
"Display operations took too long: {:?}", duration);
}
#[test]
fn test_public_display_methods_performance() {
let display = Display::new();
let start = Instant::now();
for i in 0..50 {
display.print_header();
display.print_help();
display.print_status_bar("GPT-4", "OpenAI", "test", &[("feature", true), ("another", false)]);
display.print_error_with_context("Test error", Some("Context"), &["Fix it"]);
display.print_section_header("Test Section", "🔧");
display.print_feature_status(&[("Feature 1", true, Some("Desc")), ("Feature 2", false, None)]);
display.clear_current_line();
display.print_separator("-");
display.print_progress_bar(i, 100, "Progress");
}
let duration = start.elapsed();
// Should handle complex display operations efficiently
assert!(duration < Duration::from_secs(2),
"Complex display operations took too long: {:?}", duration);
}
#[test]
fn test_assistant_response_performance() {
let display = Display::new();
// Test with moderately sized content with formatting
let content = "This is a test with `inline code` and some other text.\n\n```rust\nfn test() {\n println!(\"Hello\");\n}\n```\n\nMore text here.".repeat(5);
let start = Instant::now();
for _ in 0..10 {
display.print_assistant_response(&content);
}
let duration = start.elapsed();
// Should handle moderately sized content efficiently
assert!(duration < Duration::from_secs(3),
"Assistant response formatting took too long: {:?}", duration);
}
#[test]
fn test_conversation_history_performance() {
use gpt_cli_rust::core::Message;
let display = Display::new();
// Create test messages (need to store them separately to avoid borrow issues)
let mut message_store = Vec::new();
for i in 0..20 {
let message = Message {
role: if i % 2 == 0 { "user" } else { "assistant" }.to_string(),
content: format!("This is test message number {} with some content.", i),
};
message_store.push(message);
}
// Create references after all messages are created
let messages: Vec<(usize, &Message)> = message_store.iter().enumerate()
.map(|(i, msg)| (i + 1, msg)).collect();
let start = Instant::now();
display.print_conversation_history(&messages);
let duration = start.elapsed();
// Should handle conversation history efficiently
assert!(duration < Duration::from_secs(1),
"Conversation history display took too long: {:?}", duration);
}
#[test]
fn test_input_handler_creation_performance() {
let start = Instant::now();
for _ in 0..10 {
let _handler = InputHandler::default();
}
let duration = start.elapsed();
// Input handler creation should be fast
assert!(duration < Duration::from_millis(500),
"Input handler creation took too long: {:?}", duration);
}
#[test]
fn test_select_from_list_empty_performance() {
let handler = InputHandler::default();
let empty_items: Vec<String> = vec![];
let start = Instant::now();
for _ in 0..100 {
let _ = handler.select_from_list("Test", &empty_items, None);
}
let duration = start.elapsed();
// Empty list handling should be very fast
assert!(duration < Duration::from_millis(100),
"Empty list handling took too long: {:?}", duration);
}
#[test]
fn test_display_creation_performance() {
let start = Instant::now();
for _ in 0..100 {
let _display = Display::new();
}
let duration = start.elapsed();
// Display creation should be efficient
assert!(duration < Duration::from_millis(500),
"Display creation took too long: {:?}", duration);
}
#[test]
fn test_spinner_performance() {
let display = Display::new();
let start = Instant::now();
for i in 0..50 {
let spinner = display.show_spinner(&format!("Operation {}", i));
spinner.finish("Completed");
let spinner2 = display.show_spinner(&format!("Operation {}", i));
spinner2.finish_with_error("Failed");
}
let duration = start.elapsed();
// Spinner operations should be fast
assert!(duration < Duration::from_secs(1),
"Spinner operations took too long: {:?}", duration);
}
#[test]
fn test_large_feature_status_performance() {
let display = Display::new();
// Create many features with static strings to avoid lifetime issues
let features: Vec<(&str, bool, Option<&str>)> = (0..10)
.map(|i| (
"Feature Name",
i % 2 == 0,
if i % 3 == 0 { Some("Feature description") } else { None }
))
.collect();
let start = Instant::now();
for _ in 0..10 {
display.print_feature_status(&features);
}
let duration = start.elapsed();
// Should handle many features efficiently
assert!(duration < Duration::from_secs(1),
"Large feature status display took too long: {:?}", duration);
}
#[test]
fn test_memory_usage_stability() {
let display = Display::new();
// Test that repeated operations don't cause memory leaks
// This is a basic test - in a real scenario you'd use a memory profiler
for i in 0..100 {
display.print_status_bar("GPT-4", "OpenAI", "test", &[("feature", true)]);
display.print_error_with_context("Error", Some("Context"), &["Suggestion"]);
display.print_section_header(&format!("Section {}", i), "🔧");
let spinner = display.show_spinner("Test");
spinner.finish("Done");
}
// If we get here without panic/crash, memory handling is reasonable
assert!(true);
}
#[test]
fn test_input_cleanup_performance() {
let start = Instant::now();
for _ in 0..50 {
let mut handler = InputHandler::default();
let _ = handler.cleanup();
let _ = handler.save_history();
}
let duration = start.elapsed();
// Cleanup operations should be efficient
assert!(duration < Duration::from_secs(1),
"Input cleanup took too long: {:?}", duration);
}