diff --git a/src/app.rs b/src/app.rs index d2080d5c..9d746b0b 100644 --- a/src/app.rs +++ b/src/app.rs @@ -2,9 +2,14 @@ //! //! See `handle_startup()` for the first code that runs on app startup. -use std::{cell::RefCell, collections::HashMap}; +use std::{ + cell::RefCell, + collections::{hash_map::DefaultHasher, HashMap}, + hash::{Hash, Hasher}, + time::Duration, +}; use makepad_widgets::*; -use matrix_sdk::{RoomState, ruma::{OwnedEventId, OwnedRoomId, RoomId}}; +use matrix_sdk::{RoomState, ruma::{OwnedEventId, OwnedRoomId, OwnedUserId, RoomId}}; use serde::{Deserialize, Serialize}; use crate::{ avatar_cache::clear_avatar_cache, room_preview_cache::clear_room_preview_cache, home::{ @@ -164,6 +169,7 @@ pub struct App { #[live] ui: WidgetRef, /// The top-level app state, shared across various parts of the app. #[rust] app_state: AppState, + #[rust] lifecycle: AppLifecycle, /// The details of a room we're waiting on to be loaded so that we can navigate to it. /// This can be either a room we're waiting to join, or one we're waiting to be invited to. /// Also includes an optional room ID to be closed once the awaited room has been loaded. @@ -569,6 +575,44 @@ fn clear_all_app_state(cx: &mut Cx) { clear_room_preview_cache(cx); } +#[derive(Debug)] +struct AppLifecycle { + is_foreground: bool, + is_active: bool, + last_app_state_save: Option, + shutdown_started: bool, +} + +impl Default for AppLifecycle { + fn default() -> Self { + Self { + is_foreground: true, + is_active: true, + last_app_state_save: None, + shutdown_started: false, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct AppStateSaveFingerprint { + user_id: OwnedUserId, + hash: u64, + len: usize, +} + +impl AppStateSaveFingerprint { + fn new(user_id: OwnedUserId, bytes: &[u8]) -> Self { + let mut hasher = DefaultHasher::new(); + bytes.hash(&mut hasher); + Self { + user_id, + hash: hasher.finish(), + len: bytes.len(), + } + } +} + impl AppMain for App { fn script_mod(vm: &mut ScriptVm) -> makepad_widgets::ScriptValue { // Order matters: base widgets first, then app widgets, then app UI. @@ -616,37 +660,7 @@ impl AppMain for App { self.app_state.app_prefs.broadcast_all(cx); } - if let Event::Shutdown = event { - let window_ref = self.ui.window(cx, ids!(main_window)); - if let Err(e) = persistence::save_window_state(window_ref, cx) { - error!("Failed to save window state. Error: {e}"); - } - if let Some(user_id) = current_user_id() { - let app_state = self.app_state.clone(); - if let Err(e) = persistence::save_app_state(app_state, user_id) { - error!("Failed to save app state. Error: {e}"); - } - } - #[cfg(feature = "tsp")] { - // Save the TSP wallet state, if it exists, with a 3-second timeout. - let tsp_state = std::mem::take(&mut *crate::tsp::tsp_state_ref().lock().unwrap()); - let res = crate::sliding_sync::block_on_async_with_timeout( - Some(std::time::Duration::from_secs(3)), - async move { - match tsp_state.close_and_serialize().await { - Ok(saved_state) => match persistence::save_tsp_state_async(saved_state).await { - Ok(_) => { } - Err(e) => error!("Failed to save TSP wallet state. Error: {e}"), - } - Err(e) => error!("Failed to close and serialize TSP wallet state. Error: {e}"), - } - }, - ); - if let Err(_e) = res { - error!("Failed to save TSP wallet state before app shutdown. Error: Timed Out."); - } - } - } + self.handle_lifecycle_event(cx, event); // Forward events to the MatchEvent trait implementation. self.match_event(cx, event); @@ -657,6 +671,110 @@ impl AppMain for App { } impl App { + fn handle_lifecycle_event(&mut self, cx: &mut Cx, event: &Event) { + match event { + Event::QuitRequested(e) => { + log!("Received quit request: {:?}. Persisting state before allowing quit.", e.reason); + self.persist_runtime_state(cx, "quit request"); + } + Event::Pause => { + if self.lifecycle.is_active { + log!("App paused; persisting runtime state."); + self.lifecycle.is_active = false; + } + self.persist_runtime_state(cx, "pause"); + } + Event::Resume => { + if !self.lifecycle.is_active { + log!("App resumed."); + self.lifecycle.is_active = true; + } + crate::sliding_sync::set_sync_service_desired_running(true, "app resume"); + } + Event::Background => { + if self.lifecycle.is_foreground { + log!("App entered background; persisting state and stopping Matrix sync."); + self.lifecycle.is_foreground = false; + } + self.persist_runtime_state(cx, "background"); + crate::sliding_sync::set_sync_service_desired_running(false, "app background"); + } + Event::Foreground => { + if !self.lifecycle.is_foreground { + log!("App entered foreground; starting Matrix sync."); + self.lifecycle.is_foreground = true; + } + crate::sliding_sync::set_sync_service_desired_running(true, "app foreground"); + } + Event::Shutdown => self.handle_shutdown(cx), + _ => {} + } + } + + fn persist_runtime_state(&mut self, cx: &mut Cx, reason: &'static str) { + let window_ref = self.ui.window(cx, ids!(main_window)); + if let Err(e) = persistence::save_window_state(window_ref, cx) { + error!("Failed to save window state during {reason}. Error: {e}"); + } + + let Some(user_id) = current_user_id() else { + log!("Skipping app state persistence during {reason}: no logged-in Matrix user."); + return; + }; + + let app_state_json = match persistence::serialize_app_state(&self.app_state) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to serialize app state during {reason}. Error: {e}"); + return; + } + }; + let fingerprint = AppStateSaveFingerprint::new(user_id.clone(), &app_state_json); + if self.lifecycle.last_app_state_save.as_ref() == Some(&fingerprint) { + log!("Skipping app state persistence during {reason}: state is unchanged."); + return; + } + + if let Err(e) = persistence::save_app_state_bytes(&app_state_json, &user_id) { + error!("Failed to save app state during {reason}. Error: {e}"); + } else { + self.lifecycle.last_app_state_save = Some(fingerprint); + } + } + + fn handle_shutdown(&mut self, cx: &mut Cx) { + if self.lifecycle.shutdown_started { + log!("Ignoring duplicate shutdown lifecycle event."); + return; + } + self.lifecycle.shutdown_started = true; + + self.persist_runtime_state(cx, "shutdown"); + + if let Err(_e) = crate::sliding_sync::stop_sync_service_for_shutdown(Duration::from_secs(3)) { + error!("Failed to stop Matrix sync service before shutdown. Error: Timed out."); + } + + #[cfg(feature = "tsp")] { + let tsp_state = std::mem::take(&mut *crate::tsp::tsp_state_ref().lock().unwrap()); + let res = crate::sliding_sync::block_on_async_with_timeout( + Some(Duration::from_secs(3)), + async move { + match tsp_state.close_and_serialize().await { + Ok(saved_state) => match persistence::save_tsp_state_async(saved_state).await { + Ok(_) => { } + Err(e) => error!("Failed to save TSP wallet state. Error: {e}"), + } + Err(e) => error!("Failed to close and serialize TSP wallet state. Error: {e}"), + } + }, + ); + if let Err(_e) = res { + error!("Failed to save TSP wallet state before app shutdown. Error: Timed Out."); + } + } + } + fn update_login_visibility(&self, cx: &mut Cx) { let show_login = !self.app_state.logged_in; if !show_login { diff --git a/src/persistence/app_state.rs b/src/persistence/app_state.rs index 6bc88714..fadde744 100644 --- a/src/persistence/app_state.rs +++ b/src/persistence/app_state.rs @@ -5,12 +5,10 @@ use serde::{self, Deserialize, Serialize}; use matrix_sdk::ruma::{OwnedUserId, UserId}; use crate::{app::AppState, app_data_dir, persistence::persistent_state_dir}; - const LATEST_APP_STATE_FILE_NAME: &str = "latest_app_state.json"; const WINDOW_GEOM_STATE_FILE_NAME: &str = "window_geom_state.json"; - /// Persistable state of the window's size, position, and fullscreen status. #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct WindowGeomState { @@ -22,17 +20,24 @@ pub struct WindowGeomState { pub is_fullscreen: bool, } - /// Save the current app state to persistent storage. -pub fn save_app_state( - app_state: AppState, - user_id: OwnedUserId, -) -> anyhow::Result<()> { - let file = std::fs::File::create( - persistent_state_dir(&user_id).join(LATEST_APP_STATE_FILE_NAME) - )?; +pub fn save_app_state(app_state: AppState, user_id: OwnedUserId) -> anyhow::Result<()> { + let bytes = serialize_app_state(&app_state)?; + save_app_state_bytes(&bytes, &user_id) +} + +/// Serializes the current app state into the same format used by [`save_app_state`]. +pub fn serialize_app_state(app_state: &AppState) -> anyhow::Result> { + Ok(serde_json::to_vec(app_state)?) +} + +/// Save pre-serialized app state bytes to persistent storage. +pub fn save_app_state_bytes(app_state_json: &[u8], user_id: &UserId) -> anyhow::Result<()> { + let state_dir = persistent_state_dir(user_id); + std::fs::create_dir_all(&state_dir)?; + let file = std::fs::File::create(state_dir.join(LATEST_APP_STATE_FILE_NAME))?; let mut writer = std::io::BufWriter::new(file); - serde_json::to_writer(&mut writer, &app_state)?; + writer.write_all(app_state_json)?; writer.flush()?; log!("Successfully saved app state to persistent storage."); Ok(()) @@ -67,7 +72,7 @@ pub async fn load_app_state(user_id: &UserId) -> anyhow::Result { log!("No saved app state found, using default."); return Ok(AppState::default()); } - Err(e) => return Err(e.into()) + Err(e) => return Err(e.into()), }; match serde_json::from_slice(&file_bytes) { Ok(app_state) => { @@ -75,7 +80,9 @@ pub async fn load_app_state(user_id: &UserId) -> anyhow::Result { Ok(app_state) } Err(e) => { - error!("Failed to deserialize app state: {e}. This may be due to an incompatible format from a previous version."); + error!( + "Failed to deserialize app state: {e}. This may be due to an incompatible format from a previous version." + ); // Backup the old file to preserve user's data let backup_path = state_path.with_extension("json.bak"); diff --git a/src/sliding_sync.rs b/src/sliding_sync.rs index 2a47ae2d..6a3d4d4d 100644 --- a/src/sliding_sync.rs +++ b/src/sliding_sync.rs @@ -2120,6 +2120,10 @@ pub fn current_user_id() -> Option { /// The singleton sync service. static SYNC_SERVICE: Mutex>> = Mutex::new(None); +static SYNC_SERVICE_DESIRED_RUNNING: AtomicBool = AtomicBool::new(true); +static SYNC_SERVICE_ASSUMED_RUNNING: AtomicBool = AtomicBool::new(false); +static SYNC_SERVICE_LIFECYCLE_LOCK: LazyLock> = + LazyLock::new(|| tokio::sync::Mutex::new(())); /// Set to `true` when the access token has been rejected by the homeserver, /// signaling the main task to tear down the current session and wait for re-login. @@ -2133,6 +2137,73 @@ pub fn get_sync_service() -> Option> { SYNC_SERVICE.lock().ok()?.as_ref().cloned() } +pub fn sync_service_desired_running() -> bool { + SYNC_SERVICE_DESIRED_RUNNING.load(Ordering::Acquire) +} + +pub fn set_sync_service_desired_running(running: bool, reason: &'static str) { + let previous = SYNC_SERVICE_DESIRED_RUNNING.swap(running, Ordering::AcqRel); + if previous == running && SYNC_SERVICE_ASSUMED_RUNNING.load(Ordering::Acquire) == running { + log!( + "Matrix sync service already desired {}; skipping lifecycle request ({reason}).", + if running { "running" } else { "stopped" } + ); + return; + } + + let rt_handle = TOKIO_RUNTIME.lock().unwrap().as_ref().map(|rt| rt.handle().clone()); + let Some(rt_handle) = rt_handle else { + log!( + "Stored Matrix sync desired state as {}; Tokio runtime is not running yet ({reason}).", + if running { "running" } else { "stopped" } + ); + return; + }; + + rt_handle.spawn(apply_sync_service_desired_state(reason)); +} + +async fn apply_sync_service_desired_state(reason: &'static str) { + let _guard = SYNC_SERVICE_LIFECYCLE_LOCK.lock().await; + loop { + let desired = SYNC_SERVICE_DESIRED_RUNNING.load(Ordering::Acquire); + if SYNC_SERVICE_ASSUMED_RUNNING.load(Ordering::Acquire) == desired { + break; + } + + let Some(sync_service) = get_sync_service() else { + log!("Matrix sync service is not available while applying lifecycle request ({reason})."); + break; + }; + + if desired { + log!("Starting Matrix sync service after lifecycle request ({reason})."); + sync_service.start().await; + } else { + log!("Stopping Matrix sync service after lifecycle request ({reason})."); + sync_service.stop().await; + } + SYNC_SERVICE_ASSUMED_RUNNING.store(desired, Ordering::Release); + } +} + +pub fn stop_sync_service_for_shutdown(timeout: Duration) -> Result<(), Elapsed> { + SYNC_SERVICE_DESIRED_RUNNING.store(false, Ordering::Release); + let Some(sync_service) = get_sync_service() else { + SYNC_SERVICE_ASSUMED_RUNNING.store(false, Ordering::Release); + return Ok(()); + }; + + let result = block_on_async_with_timeout(Some(timeout), async move { + let _guard = SYNC_SERVICE_LIFECYCLE_LOCK.lock().await; + sync_service.stop().await; + }); + if result.is_ok() { + SYNC_SERVICE_ASSUMED_RUNNING.store(false, Ordering::Release); + } + result +} + /// The list of users that the current user has chosen to ignore. /// Ideally we shouldn't have to maintain this list ourselves, /// but the Matrix SDK doesn't currently properly maintain the list of ignored users. @@ -2453,13 +2524,14 @@ async fn start_matrix_client_login_and_sync(rt: Handle) { handle_load_app_state(logged_in_user_id.to_owned()); handle_sync_indicator_subscriber(&sync_service); handle_sync_service_state_subscriber(sync_service.state()); - sync_service.start().await; let room_list_service = sync_service.room_list_service(); + let sync_service = Arc::new(sync_service); - if let Some(_existing) = SYNC_SERVICE.lock().unwrap().replace(Arc::new(sync_service)) { + if let Some(_existing) = SYNC_SERVICE.lock().unwrap().replace(sync_service) { error!("BUG: unexpectedly replaced an existing sync service when initializing the matrix client."); } + apply_sync_service_desired_state("initial Matrix sync startup").await; let mut room_list_service_task = rt.spawn(room_list_service_loop(room_list_service)); let mut space_service_task = rt.spawn(space_service_loop(client)); @@ -2565,6 +2637,7 @@ async fn start_matrix_client_login_and_sync(rt: Handle) { // Clear the stored client and sync service so re-login starts fresh. let _ = CLIENT.lock().unwrap().take(); let _ = SYNC_SERVICE.lock().unwrap().take(); + SYNC_SERVICE_ASSUMED_RUNNING.store(false, Ordering::Release); continue 'login_loop; } // For non-token-related breaks (logout, fatal errors), exit the function. @@ -3327,12 +3400,18 @@ fn handle_sync_service_state_subscriber(mut subscriber: Subscriber Result<()> { // This prevents memory leaks when users logout and login again without closing the app CLIENT.lock().unwrap().take(); SYNC_SERVICE.lock().unwrap().take(); + SYNC_SERVICE_ASSUMED_RUNNING.store(false, Ordering::Release); REQUEST_SENDER.lock().unwrap().take(); IGNORED_USERS.lock().unwrap().clear(); ALL_JOINED_ROOMS.lock().unwrap().clear();