refactored app startup flow
This commit is contained in:
111
src-tauri/src/init/lifecycle.rs
Normal file
111
src-tauri/src/init/lifecycle.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use reqwest::StatusCode;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::{
|
||||
init::startup::{initialize_app_data_and_connections, transition_to_main_interface},
|
||||
lock_w,
|
||||
models::health::HealthError,
|
||||
remotes::health::HealthRemote,
|
||||
services::{
|
||||
active_app::init_active_app_changes_listener,
|
||||
auth::get_tokens,
|
||||
health_manager::show_health_manager_with_error,
|
||||
scene::close_splash_window,
|
||||
welcome::open_welcome_window,
|
||||
},
|
||||
state::FDOLL,
|
||||
system_tray::{init_system_tray, update_system_tray},
|
||||
};
|
||||
|
||||
/// Initializes and starts the core app lifecycle after initial setup.
|
||||
///
|
||||
/// This function handles:
|
||||
/// - System tray initialization and storage in app state
|
||||
/// - Active app change listener setup
|
||||
/// - Startup sequence execution with error handling
|
||||
///
|
||||
/// # Errors
|
||||
/// If the startup sequence fails, displays a health manager dialog
|
||||
/// with the error details.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// // Called automatically during app setup in initialize_app_environment()
|
||||
/// lifecycle::launch_core_services().await;
|
||||
/// ```
|
||||
pub async fn launch_core_services() {
|
||||
let tray = init_system_tray();
|
||||
{
|
||||
let mut guard = lock_w!(FDOLL);
|
||||
guard.tray = Some(tray);
|
||||
}
|
||||
|
||||
// Begin listening for foreground app changes
|
||||
init_active_app_changes_listener();
|
||||
|
||||
if let Err(err) = validate_environment_and_start_app().await {
|
||||
tracing::warn!("Startup sequence encountered an error: {}", err);
|
||||
show_health_manager_with_error(Some(err.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform checks for environment, network condition
|
||||
/// and handle situations where startup would not be appropriate.
|
||||
pub async fn validate_environment_and_start_app() -> Result<(), HealthError> {
|
||||
let health_remote = HealthRemote::try_new()?;
|
||||
|
||||
// simple retry loop to smooth transient network issues
|
||||
const MAX_ATTEMPTS: u8 = 3;
|
||||
const BACKOFF_MS: u64 = 500;
|
||||
|
||||
for attempt in 1..=MAX_ATTEMPTS {
|
||||
match health_remote.get_health().await {
|
||||
Ok(_) => {
|
||||
handle_authentication_flow().await;
|
||||
return Ok(());
|
||||
}
|
||||
Err(HealthError::NonOkStatus(status)) => {
|
||||
warn!(attempt, "server health reported non-OK status: {status}");
|
||||
return Err(HealthError::NonOkStatus(status));
|
||||
}
|
||||
Err(HealthError::UnexpectedStatus(status)) => {
|
||||
warn!(attempt, "server health check failed with status: {status}");
|
||||
return Err(HealthError::UnexpectedStatus(status));
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(attempt, "server health check failed: {err}");
|
||||
if attempt == MAX_ATTEMPTS {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if attempt < MAX_ATTEMPTS {
|
||||
sleep(Duration::from_millis(BACKOFF_MS)).await;
|
||||
}
|
||||
}
|
||||
|
||||
Err(HealthError::UnexpectedStatus(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
))
|
||||
}
|
||||
|
||||
/// Handles authentication flow: checks for tokens and either restores session or shows welcome.
|
||||
pub async fn handle_authentication_flow() {
|
||||
match get_tokens().await {
|
||||
Some(_tokens) => {
|
||||
info!("Tokens found in keyring - restoring user session");
|
||||
let start = initialize_app_data_and_connections().await;
|
||||
transition_to_main_interface(start).await;
|
||||
update_system_tray(true);
|
||||
}
|
||||
None => {
|
||||
info!("No active session found - showing welcome first");
|
||||
open_welcome_window();
|
||||
close_splash_window();
|
||||
update_system_tray(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
3
src-tauri/src/init/mod.rs
Normal file
3
src-tauri/src/init/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod lifecycle;
|
||||
pub mod startup;
|
||||
pub mod tracing;
|
||||
52
src-tauri/src/init/startup.rs
Normal file
52
src-tauri/src/init/startup.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
use std::time::Duration;
|
||||
use tokio::time::{sleep, Instant};
|
||||
|
||||
use crate::{
|
||||
services::{
|
||||
auth::get_access_token,
|
||||
scene::{close_splash_window, open_scene_window, open_splash_window},
|
||||
ws::init_ws_client,
|
||||
},
|
||||
state::init_app_data,
|
||||
};
|
||||
|
||||
async fn establish_websocket_connection() {
|
||||
const MAX_ATTEMPTS: u8 = 5;
|
||||
const BACKOFF: Duration = Duration::from_millis(300);
|
||||
|
||||
for _attempt in 1..=MAX_ATTEMPTS {
|
||||
if get_access_token().await.is_some() {
|
||||
init_ws_client().await;
|
||||
return;
|
||||
}
|
||||
|
||||
sleep(BACKOFF).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn initialize_app_data_and_connections() -> Instant {
|
||||
open_splash_window();
|
||||
|
||||
// Record start time for minimum splash duration
|
||||
let start = Instant::now();
|
||||
|
||||
// Initialize app data first so we only start WebSocket after auth is fully available
|
||||
init_app_data().await;
|
||||
|
||||
// Initialize WebSocket client after we know auth is present
|
||||
establish_websocket_connection().await;
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
pub async fn transition_to_main_interface(start: Instant) {
|
||||
// Ensure splash stays visible for at least 3 seconds
|
||||
let elapsed = start.elapsed();
|
||||
if elapsed < Duration::from_secs(3) {
|
||||
sleep(Duration::from_secs(3) - elapsed).await;
|
||||
}
|
||||
|
||||
// Close splash and open main scene
|
||||
close_splash_window();
|
||||
open_scene_window();
|
||||
}
|
||||
49
src-tauri/src/init/tracing.rs
Normal file
49
src-tauri/src/init/tracing.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use tauri::Manager;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
use crate::get_app_handle;
|
||||
|
||||
/// Initialize `tracing_subscriber` for logging to file & console
|
||||
pub fn setup_logging() {
|
||||
// Set up file appender
|
||||
let app_handle = get_app_handle();
|
||||
let app_log_dir = app_handle
|
||||
.path()
|
||||
.app_log_dir()
|
||||
.expect("Could not determine app log dir");
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
if let Err(e) = std::fs::create_dir_all(&app_log_dir) {
|
||||
eprintln!("Failed to create log directory: {}", e);
|
||||
}
|
||||
|
||||
let file_appender = tracing_appender::rolling::daily(&app_log_dir, "friendolls.log");
|
||||
let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
|
||||
|
||||
// Create a filter - adjust the level as needed (trace, debug, info, warn, error)
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info"));
|
||||
|
||||
// Create a layer that writes to the file
|
||||
let file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_target(false)
|
||||
.with_thread_ids(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.with_writer(non_blocking);
|
||||
|
||||
// Create a layer that writes to stdout (console)
|
||||
let console_layer = tracing_subscriber::fmt::layer()
|
||||
.with_target(false)
|
||||
.with_thread_ids(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true);
|
||||
|
||||
// Combine both layers with filter
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
tracing_subscriber::registry()
|
||||
.with(filter)
|
||||
.with(file_layer)
|
||||
.with(console_layer)
|
||||
.init();
|
||||
}
|
||||
Reference in New Issue
Block a user