Add more safeguards to prevent RUNNINGBOARD 0xdead10cc crashes

This commit adds more safeguards to prevent RUNNINGBOARD 0xdead10cc
crashes, by:
1. Using the `beginBackgroundTask(withName:expirationHandler:)` to
   request additional background execution time before completely
   suspending the app. See https://developer.apple.com/documentation/xcode/sigkill
2. Reorganizing app closing/cleanup tasks to be done in parallel when
   possible to decrease time needed to cleanup resources.

Signed-off-by: Daniel D’Aquino <daniel@daquino.me>
This commit is contained in:
Daniel D’Aquino
2025-09-29 16:39:14 -07:00
parent fe62aea08a
commit 1b5f107ac6
5 changed files with 54 additions and 12 deletions

View File

@@ -513,9 +513,21 @@ struct ContentView: View {
switch phase {
case .background:
print("txn: 📙 DAMUS BACKGROUNDED")
let bgTask = this_app.beginBackgroundTask(withName: "Closing things down gracefully", expirationHandler: { [weak damus_state] in
Log.error("App background signal handling: RUNNING OUT OF TIME! JUST CLOSE NDB DIRECTLY!", for: .app_lifecycle)
// Background time about to expire, so close ndb directly.
// This may still cause a memory error crash if subscription tasks have not been properly closed yet, but that is less likely than a 0xdead10cc crash if we don't do anything here.
damus_state?.ndb.close()
})
damusClosingTask = Task { @MainActor in
Log.debug("App background signal handling: App being backgrounded", for: .app_lifecycle)
let startTime = CFAbsoluteTimeGetCurrent()
await damus_state.nostrNetwork.close() // Close ndb streaming tasks before closing ndb to avoid memory errors
Log.debug("App background signal handling: Nostr network closed after %.2f seconds", for: .app_lifecycle, CFAbsoluteTimeGetCurrent() - startTime)
damus_state.ndb.close()
Log.debug("App background signal handling: Ndb closed after %.2f seconds", for: .app_lifecycle, CFAbsoluteTimeGetCurrent() - startTime)
this_app.endBackgroundTask(bgTask)
}
break
case .inactive:

View File

@@ -61,9 +61,18 @@ class NostrNetworkManager {
}
func close() async {
await withTaskGroup { group in
// Spawn each cancellation task in parallel for faster execution speed
group.addTask {
await self.reader.cancelAllTasks()
}
group.addTask {
await self.profilesManager.stop()
}
pool.close()
// But await on each one to prevent race conditions
for await value in group { continue }
}
}
func ping() {

View File

@@ -42,6 +42,7 @@ extension NostrNetworkManager {
try await Task.sleep(for: .seconds(1))
try Task.checkCancellation()
if subscriptionNeedsUpdate {
try Task.checkCancellation()
self.restartProfileListenerTask()
subscriptionNeedsUpdate = false
}
@@ -50,11 +51,20 @@ extension NostrNetworkManager {
}
func stop() async {
self.subscriptionSwitcherTask?.cancel()
self.profileListenerTask?.cancel()
await withTaskGroup { group in
// Spawn each cancellation in parallel for better execution speed
group.addTask {
await self.subscriptionSwitcherTask?.cancel()
try? await self.subscriptionSwitcherTask?.value
}
group.addTask {
await self.profileListenerTask?.cancel()
try? await self.profileListenerTask?.value
}
// But await for all of them to be done before returning to avoid race conditions
for await value in group { continue }
}
}
private func restartProfileListenerTask() {
self.profileListenerTask?.cancel()
@@ -70,6 +80,7 @@ extension NostrNetworkManager {
let pubkeys = Array(streams.keys)
guard pubkeys.count > 0 else { return }
let profileFilter = NostrFilter(kinds: [.metadata], authors: pubkeys)
try Task.checkCancellation()
for await ndbLender in self.subscriptionManager.streamIndefinitely(filters: [profileFilter], streamMode: .ndbFirst) {
try Task.checkCancellation()
try? ndbLender.borrow { ev in

View File

@@ -387,15 +387,24 @@ extension NostrNetworkManager {
}
func cancelAllTasks() async {
await withTaskGroup { group in
Log.info("Cancelling all SubscriptionManager tasks", for: .subscription_manager)
// Start each task cancellation in parallel for faster execution
for (taskId, _) in self.tasks {
Log.info("Cancelling SubscriptionManager task %s", for: .subscription_manager, taskId.uuidString)
await cancelAndCleanUp(taskId: taskId)
group.addTask {
await self.cancelAndCleanUp(taskId: taskId)
}
}
// However, wait until all cancellations are complete to avoid race conditions.
for await value in group {
continue
}
Log.info("Cancelled all SubscriptionManager tasks", for: .subscription_manager)
}
}
}
}
enum StreamItem {
/// An event which can be borrowed from NostrDB

View File

@@ -14,6 +14,7 @@ enum LogCategory: String {
case render
case storage
case networking
case app_lifecycle
case subscription_manager
case timeline
/// Logs related to Nostr Wallet Connect components