Add more safeguards to prevent RUNNINGBOARD 0xdead10cc crashes
This commit adds more safeguards to prevent RUNNINGBOARD 0xdead10cc crashes, by: 1. Using the `beginBackgroundTask(withName:expirationHandler:)` to request additional background execution time before completely suspending the app. See https://developer.apple.com/documentation/xcode/sigkill 2. Reorganizing app closing/cleanup tasks to be done in parallel when possible to decrease time needed to cleanup resources. Signed-off-by: Daniel D’Aquino <daniel@daquino.me>
This commit is contained in:
@@ -513,9 +513,21 @@ struct ContentView: View {
|
|||||||
switch phase {
|
switch phase {
|
||||||
case .background:
|
case .background:
|
||||||
print("txn: 📙 DAMUS BACKGROUNDED")
|
print("txn: 📙 DAMUS BACKGROUNDED")
|
||||||
|
let bgTask = this_app.beginBackgroundTask(withName: "Closing things down gracefully", expirationHandler: { [weak damus_state] in
|
||||||
|
Log.error("App background signal handling: RUNNING OUT OF TIME! JUST CLOSE NDB DIRECTLY!", for: .app_lifecycle)
|
||||||
|
// Background time about to expire, so close ndb directly.
|
||||||
|
// This may still cause a memory error crash if subscription tasks have not been properly closed yet, but that is less likely than a 0xdead10cc crash if we don't do anything here.
|
||||||
|
damus_state?.ndb.close()
|
||||||
|
})
|
||||||
|
|
||||||
damusClosingTask = Task { @MainActor in
|
damusClosingTask = Task { @MainActor in
|
||||||
|
Log.debug("App background signal handling: App being backgrounded", for: .app_lifecycle)
|
||||||
|
let startTime = CFAbsoluteTimeGetCurrent()
|
||||||
await damus_state.nostrNetwork.close() // Close ndb streaming tasks before closing ndb to avoid memory errors
|
await damus_state.nostrNetwork.close() // Close ndb streaming tasks before closing ndb to avoid memory errors
|
||||||
|
Log.debug("App background signal handling: Nostr network closed after %.2f seconds", for: .app_lifecycle, CFAbsoluteTimeGetCurrent() - startTime)
|
||||||
damus_state.ndb.close()
|
damus_state.ndb.close()
|
||||||
|
Log.debug("App background signal handling: Ndb closed after %.2f seconds", for: .app_lifecycle, CFAbsoluteTimeGetCurrent() - startTime)
|
||||||
|
this_app.endBackgroundTask(bgTask)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
case .inactive:
|
case .inactive:
|
||||||
|
|||||||
@@ -61,9 +61,18 @@ class NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func close() async {
|
func close() async {
|
||||||
await self.reader.cancelAllTasks()
|
await withTaskGroup { group in
|
||||||
await self.profilesManager.stop()
|
// Spawn each cancellation task in parallel for faster execution speed
|
||||||
pool.close()
|
group.addTask {
|
||||||
|
await self.reader.cancelAllTasks()
|
||||||
|
}
|
||||||
|
group.addTask {
|
||||||
|
await self.profilesManager.stop()
|
||||||
|
}
|
||||||
|
pool.close()
|
||||||
|
// But await on each one to prevent race conditions
|
||||||
|
for await value in group { continue }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ping() {
|
func ping() {
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ extension NostrNetworkManager {
|
|||||||
try await Task.sleep(for: .seconds(1))
|
try await Task.sleep(for: .seconds(1))
|
||||||
try Task.checkCancellation()
|
try Task.checkCancellation()
|
||||||
if subscriptionNeedsUpdate {
|
if subscriptionNeedsUpdate {
|
||||||
|
try Task.checkCancellation()
|
||||||
self.restartProfileListenerTask()
|
self.restartProfileListenerTask()
|
||||||
subscriptionNeedsUpdate = false
|
subscriptionNeedsUpdate = false
|
||||||
}
|
}
|
||||||
@@ -50,10 +51,19 @@ extension NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stop() async {
|
func stop() async {
|
||||||
self.subscriptionSwitcherTask?.cancel()
|
await withTaskGroup { group in
|
||||||
self.profileListenerTask?.cancel()
|
// Spawn each cancellation in parallel for better execution speed
|
||||||
try? await self.subscriptionSwitcherTask?.value
|
group.addTask {
|
||||||
try? await self.profileListenerTask?.value
|
await self.subscriptionSwitcherTask?.cancel()
|
||||||
|
try? await self.subscriptionSwitcherTask?.value
|
||||||
|
}
|
||||||
|
group.addTask {
|
||||||
|
await self.profileListenerTask?.cancel()
|
||||||
|
try? await self.profileListenerTask?.value
|
||||||
|
}
|
||||||
|
// But await for all of them to be done before returning to avoid race conditions
|
||||||
|
for await value in group { continue }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private func restartProfileListenerTask() {
|
private func restartProfileListenerTask() {
|
||||||
@@ -70,6 +80,7 @@ extension NostrNetworkManager {
|
|||||||
let pubkeys = Array(streams.keys)
|
let pubkeys = Array(streams.keys)
|
||||||
guard pubkeys.count > 0 else { return }
|
guard pubkeys.count > 0 else { return }
|
||||||
let profileFilter = NostrFilter(kinds: [.metadata], authors: pubkeys)
|
let profileFilter = NostrFilter(kinds: [.metadata], authors: pubkeys)
|
||||||
|
try Task.checkCancellation()
|
||||||
for await ndbLender in self.subscriptionManager.streamIndefinitely(filters: [profileFilter], streamMode: .ndbFirst) {
|
for await ndbLender in self.subscriptionManager.streamIndefinitely(filters: [profileFilter], streamMode: .ndbFirst) {
|
||||||
try Task.checkCancellation()
|
try Task.checkCancellation()
|
||||||
try? ndbLender.borrow { ev in
|
try? ndbLender.borrow { ev in
|
||||||
|
|||||||
@@ -387,12 +387,21 @@ extension NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func cancelAllTasks() async {
|
func cancelAllTasks() async {
|
||||||
Log.info("Cancelling all SubscriptionManager tasks", for: .subscription_manager)
|
await withTaskGroup { group in
|
||||||
for (taskId, _) in self.tasks {
|
Log.info("Cancelling all SubscriptionManager tasks", for: .subscription_manager)
|
||||||
Log.info("Cancelling SubscriptionManager task %s", for: .subscription_manager, taskId.uuidString)
|
// Start each task cancellation in parallel for faster execution
|
||||||
await cancelAndCleanUp(taskId: taskId)
|
for (taskId, _) in self.tasks {
|
||||||
|
Log.info("Cancelling SubscriptionManager task %s", for: .subscription_manager, taskId.uuidString)
|
||||||
|
group.addTask {
|
||||||
|
await self.cancelAndCleanUp(taskId: taskId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// However, wait until all cancellations are complete to avoid race conditions.
|
||||||
|
for await value in group {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
Log.info("Cancelled all SubscriptionManager tasks", for: .subscription_manager)
|
||||||
}
|
}
|
||||||
Log.info("Cancelled all SubscriptionManager tasks", for: .subscription_manager)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ enum LogCategory: String {
|
|||||||
case render
|
case render
|
||||||
case storage
|
case storage
|
||||||
case networking
|
case networking
|
||||||
|
case app_lifecycle
|
||||||
case subscription_manager
|
case subscription_manager
|
||||||
case timeline
|
case timeline
|
||||||
/// Logs related to Nostr Wallet Connect components
|
/// Logs related to Nostr Wallet Connect components
|
||||||
|
|||||||
Reference in New Issue
Block a user