Add EntityPreloader for batched profile metadata preloading
Implements an actor-based preloading system to efficiently fetch profile metadata for note authors and referenced users. The EntityPreloader queues requests and batches them intelligently (500 pubkeys or 1 second timeout) to avoid network overload while improving UX by ensuring profiles are available when rendering notes. Key changes: - Add EntityPreloader actor with queue-based batching logic - Integrate with SubscriptionManager via PreloadStrategy enum - Add lifecycle management (start/stop on app foreground/background) - Skip preload for pubkeys already cached in ndb - Include comprehensive test suite with 11 test cases covering batching, deduplication, and edge cases - Optimize ProfilePicView to load from ndb before first render Closes: https://github.com/damus-io/damus/issues/gh-3511 Changelog-Added: Profile metadata preloading for improved timeline performance Signed-off-by: Daniel D’Aquino <daniel@daquino.me>
This commit is contained in:
@@ -1700,6 +1700,10 @@
|
|||||||
D77135D62E7B78D700E7639F /* DataExtensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77135D22E7B766300E7639F /* DataExtensions.swift */; };
|
D77135D62E7B78D700E7639F /* DataExtensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77135D22E7B766300E7639F /* DataExtensions.swift */; };
|
||||||
D773BC5F2C6D538500349F0A /* CommentItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D773BC5E2C6D538500349F0A /* CommentItem.swift */; };
|
D773BC5F2C6D538500349F0A /* CommentItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D773BC5E2C6D538500349F0A /* CommentItem.swift */; };
|
||||||
D773BC602C6D538500349F0A /* CommentItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D773BC5E2C6D538500349F0A /* CommentItem.swift */; };
|
D773BC602C6D538500349F0A /* CommentItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D773BC5E2C6D538500349F0A /* CommentItem.swift */; };
|
||||||
|
D776BE402F232B17002DA1C9 /* EntityPreloader.swift in Sources */ = {isa = PBXBuildFile; fileRef = D776BE3F2F232B17002DA1C9 /* EntityPreloader.swift */; };
|
||||||
|
D776BE412F232B17002DA1C9 /* EntityPreloader.swift in Sources */ = {isa = PBXBuildFile; fileRef = D776BE3F2F232B17002DA1C9 /* EntityPreloader.swift */; };
|
||||||
|
D776BE422F232B17002DA1C9 /* EntityPreloader.swift in Sources */ = {isa = PBXBuildFile; fileRef = D776BE3F2F232B17002DA1C9 /* EntityPreloader.swift */; };
|
||||||
|
D776BE442F23301A002DA1C9 /* EntityPreloaderTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D776BE432F233012002DA1C9 /* EntityPreloaderTests.swift */; };
|
||||||
D77BFA0B2AE3051200621634 /* ProfileActionSheetView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77BFA0A2AE3051200621634 /* ProfileActionSheetView.swift */; };
|
D77BFA0B2AE3051200621634 /* ProfileActionSheetView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77BFA0A2AE3051200621634 /* ProfileActionSheetView.swift */; };
|
||||||
D77DA2C42F19CA48000B7093 /* AsyncStreamUtilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */; };
|
D77DA2C42F19CA48000B7093 /* AsyncStreamUtilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */; };
|
||||||
D77DA2C52F19CA48000B7093 /* AsyncStreamUtilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */; };
|
D77DA2C52F19CA48000B7093 /* AsyncStreamUtilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */; };
|
||||||
@@ -2820,6 +2824,8 @@
|
|||||||
D76BE18B2E0CF3D5004AD0C6 /* Interests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Interests.swift; sourceTree = "<group>"; };
|
D76BE18B2E0CF3D5004AD0C6 /* Interests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Interests.swift; sourceTree = "<group>"; };
|
||||||
D77135D22E7B766300E7639F /* DataExtensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DataExtensions.swift; sourceTree = "<group>"; };
|
D77135D22E7B766300E7639F /* DataExtensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DataExtensions.swift; sourceTree = "<group>"; };
|
||||||
D773BC5E2C6D538500349F0A /* CommentItem.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CommentItem.swift; sourceTree = "<group>"; };
|
D773BC5E2C6D538500349F0A /* CommentItem.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CommentItem.swift; sourceTree = "<group>"; };
|
||||||
|
D776BE3F2F232B17002DA1C9 /* EntityPreloader.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EntityPreloader.swift; sourceTree = "<group>"; };
|
||||||
|
D776BE432F233012002DA1C9 /* EntityPreloaderTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EntityPreloaderTests.swift; sourceTree = "<group>"; };
|
||||||
D77BFA0A2AE3051200621634 /* ProfileActionSheetView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ProfileActionSheetView.swift; sourceTree = "<group>"; };
|
D77BFA0A2AE3051200621634 /* ProfileActionSheetView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ProfileActionSheetView.swift; sourceTree = "<group>"; };
|
||||||
D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AsyncStreamUtilities.swift; sourceTree = "<group>"; };
|
D77DA2C32F19CA40000B7093 /* AsyncStreamUtilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AsyncStreamUtilities.swift; sourceTree = "<group>"; };
|
||||||
D77DA2C72F19D452000B7093 /* NegentropyUtilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NegentropyUtilities.swift; sourceTree = "<group>"; };
|
D77DA2C72F19D452000B7093 /* NegentropyUtilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NegentropyUtilities.swift; sourceTree = "<group>"; };
|
||||||
@@ -3867,6 +3873,7 @@
|
|||||||
4CE6DEF627F7A08200C66700 /* damusTests */ = {
|
4CE6DEF627F7A08200C66700 /* damusTests */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
|
D776BE432F233012002DA1C9 /* EntityPreloaderTests.swift */,
|
||||||
D77DA2CD2F1C2596000B7093 /* SubscriptionManagerNegentropyTests.swift */,
|
D77DA2CD2F1C2596000B7093 /* SubscriptionManagerNegentropyTests.swift */,
|
||||||
D74723ED2F15B0D6002DA12A /* NegentropySupportTests.swift */,
|
D74723ED2F15B0D6002DA12A /* NegentropySupportTests.swift */,
|
||||||
D72734292F089EE600F90677 /* NdbMigrationTests.swift */,
|
D72734292F089EE600F90677 /* NdbMigrationTests.swift */,
|
||||||
@@ -5238,6 +5245,7 @@
|
|||||||
D73BDB122D71212600D69970 /* NostrNetworkManager */ = {
|
D73BDB122D71212600D69970 /* NostrNetworkManager */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
|
D776BE3F2F232B17002DA1C9 /* EntityPreloader.swift */,
|
||||||
D72B6FA12E7DFB3F0050CD1D /* ProfilesManager.swift */,
|
D72B6FA12E7DFB3F0050CD1D /* ProfilesManager.swift */,
|
||||||
D733F9E02D92C1AA00317B11 /* SubscriptionManager.swift */,
|
D733F9E02D92C1AA00317B11 /* SubscriptionManager.swift */,
|
||||||
D73BDB172D71310C00D69970 /* UserRelayListErrors.swift */,
|
D73BDB172D71310C00D69970 /* UserRelayListErrors.swift */,
|
||||||
@@ -5851,6 +5859,7 @@
|
|||||||
4C285C8A2838B985008A31F1 /* ProfilePictureSelector.swift in Sources */,
|
4C285C8A2838B985008A31F1 /* ProfilePictureSelector.swift in Sources */,
|
||||||
4CFD502F2A2DA45800A229DB /* MediaView.swift in Sources */,
|
4CFD502F2A2DA45800A229DB /* MediaView.swift in Sources */,
|
||||||
D7373BA62B688EA300F7783D /* DamusPurpleTranslationSetupView.swift in Sources */,
|
D7373BA62B688EA300F7783D /* DamusPurpleTranslationSetupView.swift in Sources */,
|
||||||
|
D776BE402F232B17002DA1C9 /* EntityPreloader.swift in Sources */,
|
||||||
4C9F18E429ABDE6D008C55EC /* MaybeAnonPfpView.swift in Sources */,
|
4C9F18E429ABDE6D008C55EC /* MaybeAnonPfpView.swift in Sources */,
|
||||||
4CA5588329F33F5B00DC6A45 /* StringCodable.swift in Sources */,
|
4CA5588329F33F5B00DC6A45 /* StringCodable.swift in Sources */,
|
||||||
4C75EFB92804A2740006080F /* EventView.swift in Sources */,
|
4C75EFB92804A2740006080F /* EventView.swift in Sources */,
|
||||||
@@ -6376,6 +6385,7 @@
|
|||||||
E06336AA2B75832100A88E6B /* ImageMetadataTest.swift in Sources */,
|
E06336AA2B75832100A88E6B /* ImageMetadataTest.swift in Sources */,
|
||||||
4C363AA02828A8DD006E126D /* LikeTests.swift in Sources */,
|
4C363AA02828A8DD006E126D /* LikeTests.swift in Sources */,
|
||||||
D7A0D8752D1FE67900DCBE59 /* EditPictureControlTests.swift in Sources */,
|
D7A0D8752D1FE67900DCBE59 /* EditPictureControlTests.swift in Sources */,
|
||||||
|
D776BE442F23301A002DA1C9 /* EntityPreloaderTests.swift in Sources */,
|
||||||
4C90BD1C283AC38E008EE7EF /* Bech32Tests.swift in Sources */,
|
4C90BD1C283AC38E008EE7EF /* Bech32Tests.swift in Sources */,
|
||||||
50A50A8D29A09E1C00C01BE7 /* RequestTests.swift in Sources */,
|
50A50A8D29A09E1C00C01BE7 /* RequestTests.swift in Sources */,
|
||||||
D7100CB62EEA3E20008D94B7 /* AutoSaveViewModelTests.swift in Sources */,
|
D7100CB62EEA3E20008D94B7 /* AutoSaveViewModelTests.swift in Sources */,
|
||||||
@@ -6474,6 +6484,7 @@
|
|||||||
D73B74E22D8365BA0067BDBC /* ExtraFonts.swift in Sources */,
|
D73B74E22D8365BA0067BDBC /* ExtraFonts.swift in Sources */,
|
||||||
82D6FAE32CD99F7900C925F4 /* FollowedNotify.swift in Sources */,
|
82D6FAE32CD99F7900C925F4 /* FollowedNotify.swift in Sources */,
|
||||||
82D6FAE42CD99F7900C925F4 /* FollowNotify.swift in Sources */,
|
82D6FAE42CD99F7900C925F4 /* FollowNotify.swift in Sources */,
|
||||||
|
D776BE412F232B17002DA1C9 /* EntityPreloader.swift in Sources */,
|
||||||
82D6FAE52CD99F7900C925F4 /* LikedNotify.swift in Sources */,
|
82D6FAE52CD99F7900C925F4 /* LikedNotify.swift in Sources */,
|
||||||
82D6FAE62CD99F7900C925F4 /* LocalNotificationNotify.swift in Sources */,
|
82D6FAE62CD99F7900C925F4 /* LocalNotificationNotify.swift in Sources */,
|
||||||
5C4FA8012DC5119300CE658C /* FollowPackPreview.swift in Sources */,
|
5C4FA8012DC5119300CE658C /* FollowPackPreview.swift in Sources */,
|
||||||
@@ -6964,6 +6975,7 @@
|
|||||||
4C36247A2D5EA20C00DD066E /* bech32.c in Sources */,
|
4C36247A2D5EA20C00DD066E /* bech32.c in Sources */,
|
||||||
4C3624792D5EA20200DD066E /* bolt11.c in Sources */,
|
4C3624792D5EA20200DD066E /* bolt11.c in Sources */,
|
||||||
4C3624782D5EA1FE00DD066E /* error.c in Sources */,
|
4C3624782D5EA1FE00DD066E /* error.c in Sources */,
|
||||||
|
D776BE422F232B17002DA1C9 /* EntityPreloader.swift in Sources */,
|
||||||
4C3624772D5EA1FA00DD066E /* nostr_bech32.c in Sources */,
|
4C3624772D5EA1FA00DD066E /* nostr_bech32.c in Sources */,
|
||||||
4C3624762D5EA1F600DD066E /* content_parser.c in Sources */,
|
4C3624762D5EA1F600DD066E /* content_parser.c in Sources */,
|
||||||
4C3624752D5EA1E000DD066E /* block.c in Sources */,
|
4C3624752D5EA1E000DD066E /* block.c in Sources */,
|
||||||
|
|||||||
196
damus/Core/Networking/NostrNetworkManager/EntityPreloader.swift
Normal file
196
damus/Core/Networking/NostrNetworkManager/EntityPreloader.swift
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
//
|
||||||
|
// EntityPreloader.swift
|
||||||
|
// damus
|
||||||
|
//
|
||||||
|
// Created by Daniel D'Aquino on 2026-01-22.
|
||||||
|
//
|
||||||
|
|
||||||
|
import Foundation
|
||||||
|
import os
|
||||||
|
import Negentropy
|
||||||
|
|
||||||
|
extension NostrNetworkManager {
|
||||||
|
/// Preloads entities referenced in notes to improve user experience.
|
||||||
|
///
|
||||||
|
/// This actor efficiently batches entity preload requests to avoid overloading the network.
|
||||||
|
/// Currently limited to preloading profile metadata, but designed to be expanded to other
|
||||||
|
/// entity types (e.g., referenced events, media) in the future.
|
||||||
|
///
|
||||||
|
/// ## Implementation notes
|
||||||
|
///
|
||||||
|
/// - Uses a queue to collect preload requests
|
||||||
|
/// - Batches requests intelligently: either when 500 pending requests accumulate, or after 1 second
|
||||||
|
/// - Uses standard Nostr subscriptions to fetch metadata
|
||||||
|
/// - Runs a long-running task to process the queue continuously
|
||||||
|
actor EntityPreloader {
|
||||||
|
private let pool: RelayPool
|
||||||
|
private let ndb: Ndb
|
||||||
|
private let queue: QueueableNotify<Set<Pubkey>>
|
||||||
|
private var processingTask: Task<Void, Never>?
|
||||||
|
private var accumulatedPubkeys = Set<Pubkey>()
|
||||||
|
|
||||||
|
private static let logger = Logger(
|
||||||
|
subsystem: Constants.MAIN_APP_BUNDLE_IDENTIFIER,
|
||||||
|
category: "entity_preloader"
|
||||||
|
)
|
||||||
|
|
||||||
|
/// Maximum number of items allowed in the queue before old items are discarded
|
||||||
|
private static let maxQueueItems = 1000
|
||||||
|
/// Batch size threshold - preload immediately when this many requests are pending
|
||||||
|
private static let batchSizeThreshold = 500
|
||||||
|
/// Time threshold - preload after this duration even if batch size not reached
|
||||||
|
private static let timeThreshold: Duration = .seconds(1)
|
||||||
|
|
||||||
|
init(pool: RelayPool, ndb: Ndb) {
|
||||||
|
self.pool = pool
|
||||||
|
self.ndb = ndb
|
||||||
|
self.queue = QueueableNotify<Set<Pubkey>>(maxQueueItems: Self.maxQueueItems)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts the preloader's background processing task
|
||||||
|
func start() {
|
||||||
|
guard processingTask == nil else {
|
||||||
|
Self.logger.warning("EntityPreloader already started")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Self.logger.info("Starting EntityPreloader")
|
||||||
|
processingTask = Task {
|
||||||
|
await monitorQueue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops the preloader's background processing task
|
||||||
|
func stop() {
|
||||||
|
Self.logger.info("Stopping EntityPreloader")
|
||||||
|
processingTask?.cancel()
|
||||||
|
processingTask = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Preloads metadata for the author and referenced profiles in a note
|
||||||
|
///
|
||||||
|
/// - Parameter noteLender: The note to extract profiles from
|
||||||
|
nonisolated func preload(note noteLender: NdbNoteLender) {
|
||||||
|
Task {
|
||||||
|
do {
|
||||||
|
let pubkeys = try noteLender.borrow { event in
|
||||||
|
if event.known_kind == .metadata { return Set<Pubkey>() } // Don't preload pubkeys from a user profile
|
||||||
|
var pubkeys = Set<Pubkey>()
|
||||||
|
|
||||||
|
// Add the author
|
||||||
|
pubkeys.insert(event.pubkey)
|
||||||
|
|
||||||
|
// Add all referenced pubkeys from p tags
|
||||||
|
for referencedPubkey in event.referenced_pubkeys {
|
||||||
|
pubkeys.insert(referencedPubkey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkeys
|
||||||
|
}
|
||||||
|
|
||||||
|
guard !pubkeys.isEmpty else { return }
|
||||||
|
|
||||||
|
// Filter out pubkeys that already have profiles in ndb
|
||||||
|
let pubkeysToPreload = await pubkeys.asyncFilter { pubkey in
|
||||||
|
let hasProfile = (try? await ndb.lookup_profile(pubkey, borrow: { pr in
|
||||||
|
pr != nil
|
||||||
|
})) ?? false
|
||||||
|
return !hasProfile
|
||||||
|
}
|
||||||
|
|
||||||
|
guard !pubkeysToPreload.isEmpty else {
|
||||||
|
Self.logger.debug("All \(pubkeys.count, privacy: .public) profiles already in ndb, skipping preload")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Self.logger.debug("Queueing preload for \(pubkeysToPreload.count, privacy: .public) profiles (\(pubkeys.count - pubkeysToPreload.count, privacy: .public) already cached)")
|
||||||
|
await queue.add(item: pubkeysToPreload)
|
||||||
|
} catch {
|
||||||
|
Self.logger.error("Error extracting pubkeys from note: \(error.localizedDescription, privacy: .public)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Processes the queue continuously, batching requests intelligently
|
||||||
|
private func monitorQueue() async {
|
||||||
|
await withThrowingTaskGroup { group in
|
||||||
|
group.addTask {
|
||||||
|
for await newPubkeys in await self.queue.stream {
|
||||||
|
try Task.checkCancellation()
|
||||||
|
await self.handle(newQueueItem: newPubkeys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
group.addTask {
|
||||||
|
while !Task.isCancelled {
|
||||||
|
try await Task.sleep(for: Self.timeThreshold)
|
||||||
|
await self.handleTimerTick()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func handleTimerTick() async {
|
||||||
|
if accumulatedPubkeys.count > 0 {
|
||||||
|
await self.performPreload()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func handle(newQueueItem: Set<Pubkey>) async {
|
||||||
|
accumulatedPubkeys = self.accumulatedPubkeys.union(newQueueItem)
|
||||||
|
if accumulatedPubkeys.count > Self.batchSizeThreshold {
|
||||||
|
await self.performPreload()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func performPreload() async {
|
||||||
|
let pubkeysToPreload = accumulatedPubkeys
|
||||||
|
accumulatedPubkeys.removeAll()
|
||||||
|
Self.logger.debug("Preloading \(pubkeysToPreload.count, privacy: .public) profiles")
|
||||||
|
await self.performPreload(pubkeys: pubkeysToPreload)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the actual preload operation using standard Nostr subscriptions.
|
||||||
|
///
|
||||||
|
/// - Parameter pubkeys: The set of pubkeys to preload metadata for
|
||||||
|
private func performPreload(pubkeys: Set<Pubkey>) async {
|
||||||
|
guard !pubkeys.isEmpty else { return }
|
||||||
|
|
||||||
|
print("EntityPreloader.performPreload: Starting preload for \(pubkeys.count) pubkeys")
|
||||||
|
|
||||||
|
let filter = NostrFilter(
|
||||||
|
kinds: [.metadata],
|
||||||
|
authors: Array(pubkeys)
|
||||||
|
)
|
||||||
|
|
||||||
|
for try await _ in await pool.subscribeExistingItems(
|
||||||
|
filters: [filter],
|
||||||
|
to: nil,
|
||||||
|
eoseTimeout: .seconds(10),
|
||||||
|
) {
|
||||||
|
// NO-OP: We are only subscribing to let nostrdb ingest those events, but we do not need special handling here.
|
||||||
|
guard !Task.isCancelled else { break }
|
||||||
|
}
|
||||||
|
|
||||||
|
Self.logger.debug("Completed metadata fetch for \(pubkeys.count, privacy: .public) profiles")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MARK: - Private Extensions
|
||||||
|
|
||||||
|
private extension Set {
|
||||||
|
/// Asynchronously filters the set based on an async predicate
|
||||||
|
///
|
||||||
|
/// - Parameter predicate: An async closure that returns true for elements to include
|
||||||
|
/// - Returns: A new set containing only elements for which predicate returns true
|
||||||
|
func asyncFilter(_ predicate: (Element) async -> Bool) async -> Set<Element> {
|
||||||
|
var result = Set<Element>()
|
||||||
|
for element in self {
|
||||||
|
if await predicate(element) {
|
||||||
|
result.insert(element)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -53,6 +53,7 @@ class NostrNetworkManager {
|
|||||||
func connect() async {
|
func connect() async {
|
||||||
await self.userRelayList.connect() // Will load the user's list, apply it, and get RelayPool to connect to it.
|
await self.userRelayList.connect() // Will load the user's list, apply it, and get RelayPool to connect to it.
|
||||||
await self.profilesManager.load()
|
await self.profilesManager.load()
|
||||||
|
await self.reader.startPreloader()
|
||||||
}
|
}
|
||||||
|
|
||||||
func disconnectRelays() async {
|
func disconnectRelays() async {
|
||||||
@@ -61,12 +62,14 @@ class NostrNetworkManager {
|
|||||||
|
|
||||||
func handleAppBackgroundRequest() async {
|
func handleAppBackgroundRequest() async {
|
||||||
await self.reader.cancelAllTasks()
|
await self.reader.cancelAllTasks()
|
||||||
|
await self.reader.stopPreloader()
|
||||||
await self.pool.cleanQueuedRequestForSessionEnd()
|
await self.pool.cleanQueuedRequestForSessionEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleAppForegroundRequest() async {
|
func handleAppForegroundRequest() async {
|
||||||
// Pinging the network will automatically reconnect any dead websocket connections
|
// Pinging the network will automatically reconnect any dead websocket connections
|
||||||
await self.ping()
|
await self.ping()
|
||||||
|
await self.reader.startPreloader()
|
||||||
}
|
}
|
||||||
|
|
||||||
func close() async {
|
func close() async {
|
||||||
@@ -78,6 +81,9 @@ class NostrNetworkManager {
|
|||||||
group.addTask {
|
group.addTask {
|
||||||
await self.profilesManager.stop()
|
await self.profilesManager.stop()
|
||||||
}
|
}
|
||||||
|
group.addTask {
|
||||||
|
await self.reader.stopPreloader()
|
||||||
|
}
|
||||||
// But await on each one to prevent race conditions
|
// But await on each one to prevent race conditions
|
||||||
for await value in group { continue }
|
for await value in group { continue }
|
||||||
await pool.close()
|
await pool.close()
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ extension NostrNetworkManager {
|
|||||||
private var ndb: Ndb
|
private var ndb: Ndb
|
||||||
private var taskManager: TaskManager
|
private var taskManager: TaskManager
|
||||||
private let experimentalLocalRelayModelSupport: Bool
|
private let experimentalLocalRelayModelSupport: Bool
|
||||||
|
private let entityPreloader: EntityPreloader
|
||||||
|
|
||||||
private static let logger = Logger(
|
private static let logger = Logger(
|
||||||
subsystem: Constants.MAIN_APP_BUNDLE_IDENTIFIER,
|
subsystem: Constants.MAIN_APP_BUNDLE_IDENTIFIER,
|
||||||
@@ -31,16 +32,17 @@ extension NostrNetworkManager {
|
|||||||
self.ndb = ndb
|
self.ndb = ndb
|
||||||
self.taskManager = TaskManager()
|
self.taskManager = TaskManager()
|
||||||
self.experimentalLocalRelayModelSupport = experimentalLocalRelayModelSupport
|
self.experimentalLocalRelayModelSupport = experimentalLocalRelayModelSupport
|
||||||
|
self.entityPreloader = EntityPreloader(pool: pool, ndb: ndb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MARK: - Subscribing and Streaming data from Nostr
|
// MARK: - Subscribing and Streaming data from Nostr
|
||||||
|
|
||||||
/// Streams notes until the EOSE signal
|
/// Streams notes until the EOSE signal
|
||||||
func streamExistingEvents(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
func streamExistingEvents(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, preloadStrategy: PreloadStrategy? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||||
let timeout = timeout ?? .seconds(10)
|
let timeout = timeout ?? .seconds(10)
|
||||||
return AsyncStream<NdbNoteLender> { continuation in
|
return AsyncStream<NdbNoteLender> { continuation in
|
||||||
let streamingTask = Task {
|
let streamingTask = Task {
|
||||||
outerLoop: for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, id: id) {
|
outerLoop: for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, preloadStrategy: preloadStrategy, id: id) {
|
||||||
try Task.checkCancellation()
|
try Task.checkCancellation()
|
||||||
switch item {
|
switch item {
|
||||||
case .event(let lender):
|
case .event(let lender):
|
||||||
@@ -64,10 +66,10 @@ extension NostrNetworkManager {
|
|||||||
/// Subscribes to data from user's relays, for a maximum period of time — after which the stream will end.
|
/// Subscribes to data from user's relays, for a maximum period of time — after which the stream will end.
|
||||||
///
|
///
|
||||||
/// This is useful when waiting for some specific data from Nostr, but not indefinitely.
|
/// This is useful when waiting for some specific data from Nostr, but not indefinitely.
|
||||||
func timedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
func timedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration, streamMode: StreamMode? = nil, preloadStrategy: PreloadStrategy? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||||
return AsyncStream<NdbNoteLender> { continuation in
|
return AsyncStream<NdbNoteLender> { continuation in
|
||||||
let streamingTask = Task {
|
let streamingTask = Task {
|
||||||
for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, id: id) {
|
for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, preloadStrategy: preloadStrategy, id: id) {
|
||||||
try Task.checkCancellation()
|
try Task.checkCancellation()
|
||||||
switch item {
|
switch item {
|
||||||
case .event(lender: let lender):
|
case .event(lender: let lender):
|
||||||
@@ -88,10 +90,10 @@ extension NostrNetworkManager {
|
|||||||
/// Subscribes to notes indefinitely
|
/// Subscribes to notes indefinitely
|
||||||
///
|
///
|
||||||
/// This is useful when simply streaming all events indefinitely
|
/// This is useful when simply streaming all events indefinitely
|
||||||
func streamIndefinitely(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
func streamIndefinitely(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, preloadStrategy: PreloadStrategy? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||||
return AsyncStream<NdbNoteLender> { continuation in
|
return AsyncStream<NdbNoteLender> { continuation in
|
||||||
let streamingTask = Task {
|
let streamingTask = Task {
|
||||||
for await item in self.advancedStream(filters: filters, to: desiredRelays, streamMode: streamMode, id: id) {
|
for await item in self.advancedStream(filters: filters, to: desiredRelays, streamMode: streamMode, preloadStrategy: preloadStrategy, id: id) {
|
||||||
try Task.checkCancellation()
|
try Task.checkCancellation()
|
||||||
switch item {
|
switch item {
|
||||||
case .event(lender: let lender):
|
case .event(lender: let lender):
|
||||||
@@ -111,9 +113,10 @@ extension NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func advancedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
func advancedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, preloadStrategy: PreloadStrategy? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
||||||
let id = id ?? UUID()
|
let id = id ?? UUID()
|
||||||
let streamMode = streamMode ?? defaultStreamMode()
|
let streamMode = streamMode ?? defaultStreamMode()
|
||||||
|
let preloadStrategy = preloadStrategy ?? self.defaultPreloadingMode()
|
||||||
return AsyncStream<StreamItem> { continuation in
|
return AsyncStream<StreamItem> { continuation in
|
||||||
let timeoutTask = Task {
|
let timeoutTask = Task {
|
||||||
guard let timeout else { return }
|
guard let timeout else { return }
|
||||||
@@ -163,6 +166,10 @@ extension NostrNetworkManager {
|
|||||||
switch item {
|
switch item {
|
||||||
case .event(let lender):
|
case .event(let lender):
|
||||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||||
|
// Preload entities if requested
|
||||||
|
if case .preload = preloadStrategy {
|
||||||
|
self.entityPreloader.preload(note: lender)
|
||||||
|
}
|
||||||
continuation.yield(item)
|
continuation.yield(item)
|
||||||
case .eose:
|
case .eose:
|
||||||
break // Should not happen
|
break // Should not happen
|
||||||
@@ -201,6 +208,10 @@ extension NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
negentropyStorageVector.unsealAndInsert(nostrEvent: event)
|
negentropyStorageVector.unsealAndInsert(nostrEvent: event)
|
||||||
})
|
})
|
||||||
|
// Preload entities if requested
|
||||||
|
if case .preload = preloadStrategy {
|
||||||
|
self.entityPreloader.preload(note: lender)
|
||||||
|
}
|
||||||
continuation.yield(item)
|
continuation.yield(item)
|
||||||
case .eose:
|
case .eose:
|
||||||
break // Should not happen
|
break // Should not happen
|
||||||
@@ -392,6 +403,10 @@ extension NostrNetworkManager {
|
|||||||
self.experimentalLocalRelayModelSupport ? .ndbFirst(networkOptimization: nil) : .ndbAndNetworkParallel(networkOptimization: nil)
|
self.experimentalLocalRelayModelSupport ? .ndbFirst(networkOptimization: nil) : .ndbAndNetworkParallel(networkOptimization: nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private func defaultPreloadingMode() -> PreloadStrategy {
|
||||||
|
return .preload
|
||||||
|
}
|
||||||
|
|
||||||
// MARK: - Finding specific data from Nostr
|
// MARK: - Finding specific data from Nostr
|
||||||
|
|
||||||
/// Finds a non-replaceable event based on a note ID
|
/// Finds a non-replaceable event based on a note ID
|
||||||
@@ -495,6 +510,14 @@ extension NostrNetworkManager {
|
|||||||
|
|
||||||
// MARK: - Task management
|
// MARK: - Task management
|
||||||
|
|
||||||
|
func startPreloader() async {
|
||||||
|
await self.entityPreloader.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopPreloader() async {
|
||||||
|
await self.entityPreloader.stop()
|
||||||
|
}
|
||||||
|
|
||||||
func cancelAllTasks() async {
|
func cancelAllTasks() async {
|
||||||
await self.taskManager.cancelAllTasks()
|
await self.taskManager.cancelAllTasks()
|
||||||
}
|
}
|
||||||
@@ -628,4 +651,12 @@ extension NostrNetworkManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Defines the preloading strategy for a stream
|
||||||
|
enum PreloadStrategy {
|
||||||
|
/// No preloading - notes are not sent to EntityPreloader
|
||||||
|
case noPreloading
|
||||||
|
/// Preload metadata for authors and referenced profiles
|
||||||
|
case preload
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,6 +96,17 @@ struct ProfilePicView: View {
|
|||||||
pubkey
|
pubkey
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static func loadFromNdb(pubkey: Pubkey, ndb: Ndb) throws -> String? {
|
||||||
|
try ndb.lookup_profile(pubkey, borrow: { profileRecord in
|
||||||
|
switch profileRecord {
|
||||||
|
case .none:
|
||||||
|
return nil
|
||||||
|
case .some(let profileRecord):
|
||||||
|
return profileRecord.profile?.picture
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func get_lnurl() -> String? {
|
func get_lnurl() -> String? {
|
||||||
return try? profiles.lookup_with_timestamp(pubkey, borrow: { pr in
|
return try? profiles.lookup_with_timestamp(pubkey, borrow: { pr in
|
||||||
@@ -124,6 +135,13 @@ struct ProfilePicView: View {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
.task {
|
.task {
|
||||||
|
// Load from ndb asynchronously to avoid blocking the main thread
|
||||||
|
if picture == nil {
|
||||||
|
if let loaded = try? Self.loadFromNdb(pubkey: pubkey, ndb: damusState.ndb) {
|
||||||
|
self.picture = loaded
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for await profile in await damusState.nostrNetwork.profilesManager.streamProfile(pubkey: pubkey) {
|
for await profile in await damusState.nostrNetwork.profilesManager.streamProfile(pubkey: pubkey) {
|
||||||
if let pic = profile.picture {
|
if let pic = profile.picture {
|
||||||
self.picture = pic
|
self.picture = pic
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ class SearchHomeModel: ObservableObject {
|
|||||||
var follow_list_filter = NostrFilter(kinds: [.follow_list])
|
var follow_list_filter = NostrFilter(kinds: [.follow_list])
|
||||||
follow_list_filter.until = UInt32(Date.now.timeIntervalSince1970)
|
follow_list_filter.until = UInt32(Date.now.timeIntervalSince1970)
|
||||||
|
|
||||||
for await item in damus_state.nostrNetwork.reader.advancedStream(filters: [get_base_filter(), follow_list_filter], to: to_relays) {
|
for await item in damus_state.nostrNetwork.reader.advancedStream(filters: [get_base_filter(), follow_list_filter], to: to_relays, preloadStrategy: .preload) {
|
||||||
switch item {
|
switch item {
|
||||||
case .event(lender: let lender):
|
case .event(lender: let lender):
|
||||||
await lender.justUseACopy({ event in
|
await lender.justUseACopy({ event in
|
||||||
|
|||||||
888
damusTests/EntityPreloaderTests.swift
Normal file
888
damusTests/EntityPreloaderTests.swift
Normal file
@@ -0,0 +1,888 @@
|
|||||||
|
//
|
||||||
|
// EntityPreloaderTests.swift
|
||||||
|
// damus
|
||||||
|
//
|
||||||
|
// Created by Daniel D'Aquino on 2026-01-22.
|
||||||
|
//
|
||||||
|
|
||||||
|
import XCTest
|
||||||
|
import NostrSDK
|
||||||
|
import Negentropy
|
||||||
|
@testable import damus
|
||||||
|
|
||||||
|
/// Tests for the EntityPreloader functionality.
|
||||||
|
///
|
||||||
|
/// These tests verify that EntityPreloader correctly:
|
||||||
|
/// - Extracts author and referenced pubkeys from notes
|
||||||
|
/// - Batches preload requests efficiently
|
||||||
|
/// - Uses standard Nostr subscriptions to fetch metadata
|
||||||
|
/// - Integrates properly with SubscriptionManager
|
||||||
|
final class EntityPreloaderTests: XCTestCase {
|
||||||
|
|
||||||
|
// MARK: - Helper Functions
|
||||||
|
|
||||||
|
/// Creates and runs a local relay on the specified port.
|
||||||
|
/// - Parameter port: The port number to run the relay on
|
||||||
|
/// - Returns: The running LocalRelay instance
|
||||||
|
private func setupRelay(port: UInt16) async throws -> LocalRelay {
|
||||||
|
let builder = RelayBuilder().port(port: port).rateLimit(limit: .init(maxReqs: 100, notesPerMinute: 100))
|
||||||
|
let relay = LocalRelay(builder: builder)
|
||||||
|
try await relay.run()
|
||||||
|
print("Relay url: \(await relay.url())")
|
||||||
|
return relay
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connects to a relay and waits for the connection to be established.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - url: The relay URL to connect to
|
||||||
|
/// - label: Optional label for logging (e.g., "Relay1", "Relay2")
|
||||||
|
/// - Returns: The connected RelayConnection instance
|
||||||
|
private func connectToRelay(url: RelayURL, label: String = "") async -> RelayConnection {
|
||||||
|
var connectionContinuation: CheckedContinuation<Void, Never>?
|
||||||
|
var timeoutTask: Task<Void, Never>?
|
||||||
|
var isResumed = false
|
||||||
|
|
||||||
|
let relayConnection = RelayConnection(url: url, handleEvent: { _ in }, processUnverifiedWSEvent: { wsEvent in
|
||||||
|
let prefix = label.isEmpty ? "" : "(\(label)) "
|
||||||
|
switch wsEvent {
|
||||||
|
case .connected:
|
||||||
|
if !isResumed {
|
||||||
|
isResumed = true
|
||||||
|
timeoutTask?.cancel()
|
||||||
|
connectionContinuation?.resume()
|
||||||
|
}
|
||||||
|
case .message(let message):
|
||||||
|
print("ENTITY_PRELOADER_TEST \(prefix): Received: \(message)")
|
||||||
|
case .disconnected(let closeCode, let string):
|
||||||
|
print("ENTITY_PRELOADER_TEST \(prefix): Disconnected: \(closeCode); \(String(describing: string))")
|
||||||
|
case .error(let error):
|
||||||
|
print("ENTITY_PRELOADER_TEST \(prefix): Received error: \(error)")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
relayConnection.connect()
|
||||||
|
|
||||||
|
// Wait for connection to be established with timeout
|
||||||
|
await withCheckedContinuation { continuation in
|
||||||
|
connectionContinuation = continuation
|
||||||
|
|
||||||
|
// Start timeout task
|
||||||
|
timeoutTask = Task {
|
||||||
|
try? await Task.sleep(nanoseconds: 30_000_000_000) // 30 seconds
|
||||||
|
if !isResumed {
|
||||||
|
isResumed = true
|
||||||
|
print("ENTITY_PRELOADER_TEST \(label.isEmpty ? "" : "(\(label)) ")ERROR: Connection timeout after 30 seconds")
|
||||||
|
continuation.resume()
|
||||||
|
XCTFail("Connection to relay \(url) timed out after 30 seconds")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return relayConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sends events to a relay connection.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - events: Array of NostrEvent to send
|
||||||
|
/// - connection: The RelayConnection to send events through
|
||||||
|
private func sendEvents(_ events: [NostrEvent], to connection: RelayConnection) async throws {
|
||||||
|
for event in events {
|
||||||
|
connection.send(.typical(.event(event)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets up a NostrNetworkManager with the specified relay URLs.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - urls: Array of RelayURL to add to the manager
|
||||||
|
/// - ndb: The Ndb instance to use
|
||||||
|
/// - Returns: Configured and connected NostrNetworkManager
|
||||||
|
private func setupNetworkManager(with urls: [RelayURL], ndb: Ndb) async throws -> NostrNetworkManager {
|
||||||
|
let delegate = TestNetworkDelegate(ndb: ndb, keypair: test_keypair, bootstrapRelays: urls)
|
||||||
|
let networkManager = NostrNetworkManager(delegate: delegate, addNdbToRelayPool: true)
|
||||||
|
|
||||||
|
// Manually add relays to the pool since we're bypassing normal initialization
|
||||||
|
for url in urls {
|
||||||
|
do {
|
||||||
|
try await networkManager.userRelayList.insert(relay: .init(url: url, rwConfiguration: .readWrite), force: true)
|
||||||
|
}
|
||||||
|
catch {
|
||||||
|
switch error {
|
||||||
|
case .relayAlreadyExists: continue
|
||||||
|
default: throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await networkManager.connect()
|
||||||
|
// Wait for relay pool to be ready.
|
||||||
|
try await Task.sleep(for: .seconds(2))
|
||||||
|
|
||||||
|
return networkManager
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stores events in NostrDB for testing purposes.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - events: Array of NostrEvent to store in NDB
|
||||||
|
/// - ndb: The Ndb instance to store events in
|
||||||
|
private func storeEventsInNdb(_ events: [NostrEvent], ndb: Ndb) {
|
||||||
|
for event in events {
|
||||||
|
do {
|
||||||
|
try ndb.add(event: event)
|
||||||
|
} catch {
|
||||||
|
XCTFail("Failed to store event in NDB: \(error)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a test note with the specified content and referenced pubkeys.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - content: The content of the note
|
||||||
|
/// - keypair: The keypair to sign the note with
|
||||||
|
/// - referencedPubkeys: Pubkeys to add as p-tags
|
||||||
|
/// - Returns: A NostrEvent with the specified references
|
||||||
|
private func createNoteWithReferences(content: String, keypair: Keypair, referencedPubkeys: [Pubkey] = []) -> NostrEvent? {
|
||||||
|
var tags: [[String]] = []
|
||||||
|
for pubkey in referencedPubkeys {
|
||||||
|
tags.append(["p", pubkey.hex()])
|
||||||
|
}
|
||||||
|
|
||||||
|
return NostrEvent(content: content, keypair: keypair, tags: tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a metadata event for a given pubkey.
|
||||||
|
/// - Parameters:
|
||||||
|
/// - keypair: The keypair for the profile
|
||||||
|
/// - displayName: Display name for the profile
|
||||||
|
/// - Returns: A metadata NostrEvent
|
||||||
|
private func createMetadataEvent(keypair: Keypair, displayName: String) -> NostrEvent? {
|
||||||
|
let metadata = """
|
||||||
|
{
|
||||||
|
"name": "\(displayName)",
|
||||||
|
"display_name": "\(displayName)",
|
||||||
|
"about": "Test user"
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
return NostrEvent(content: metadata, keypair: keypair, kind: 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MARK: - Test Cases
|
||||||
|
|
||||||
|
/// Test that preloading with .noPreloading strategy does not trigger metadata fetching
|
||||||
|
func testNoPreloadingStrategy() async throws {
|
||||||
|
// Given: A relay with a note and metadata for the author
|
||||||
|
let relay = try await setupRelay(port: 9100)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = generate_new_keypair().to_keypair()
|
||||||
|
let note = NostrEvent(content: "Test note", keypair: authorKeypair)!
|
||||||
|
let metadata = createMetadataEvent(keypair: authorKeypair, displayName: "Test Author")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note, metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedNote = XCTestExpectation(description: "Received note")
|
||||||
|
|
||||||
|
// When: Streaming with .noPreloading strategy
|
||||||
|
Task {
|
||||||
|
var noteReceived = false
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .noPreloading
|
||||||
|
) {
|
||||||
|
if !noteReceived {
|
||||||
|
noteReceived = true
|
||||||
|
receivedNote.fulfill()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should receive the note but metadata should not be in NDB
|
||||||
|
await fulfillment(of: [receivedNote], timeout: 5.0)
|
||||||
|
|
||||||
|
// Give a moment to see if any preloading happens (it shouldn't)
|
||||||
|
try await Task.sleep(for: .seconds(2))
|
||||||
|
|
||||||
|
// Verify metadata was NOT preloaded into NDB
|
||||||
|
let hasMetadata = (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in
|
||||||
|
pr != nil
|
||||||
|
})) ?? false
|
||||||
|
XCTAssertFalse(hasMetadata, "Metadata should not be preloaded with .noPreloading strategy")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that preloading with .preload strategy fetches author metadata
|
||||||
|
func testPreloadAuthorMetadata() async throws {
|
||||||
|
// Given: A relay with a note and metadata for the author (metadata NOT in NDB)
|
||||||
|
let relay = try await setupRelay(port: 9101)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = test_keypair
|
||||||
|
let note = NostrEvent(content: "Test note", keypair: authorKeypair)!
|
||||||
|
let metadata = createMetadataEvent(keypair: authorKeypair, displayName: "Test Author")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note, metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let damus_state = await generate_test_damus_state(mock_profile_info: [:])
|
||||||
|
|
||||||
|
let ndb = damus_state.ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedNote = XCTestExpectation(description: "Received note")
|
||||||
|
let metadataPreloaded = XCTestExpectation(description: "Metadata preloaded")
|
||||||
|
|
||||||
|
// When: Streaming with .preload strategy
|
||||||
|
Task {
|
||||||
|
var noteReceived = false
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
if !noteReceived {
|
||||||
|
noteReceived = true
|
||||||
|
receivedNote.fulfill()
|
||||||
|
|
||||||
|
// Poll for metadata to be preloaded
|
||||||
|
for _ in 0..<20 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(100))
|
||||||
|
if (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false {
|
||||||
|
metadataPreloaded.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should receive the note and metadata should be preloaded
|
||||||
|
await fulfillment(of: [receivedNote, metadataPreloaded], timeout: 30.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that preloading fetches metadata for all referenced pubkeys (p-tags)
|
||||||
|
func testPreloadReferencedPubkeys() async throws {
|
||||||
|
// Given: A relay with a note that references other users, and metadata for all referenced users
|
||||||
|
let relay = try await setupRelay(port: 9102)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = test_keypair
|
||||||
|
let user1Keypair = generate_new_keypair().to_keypair()
|
||||||
|
let user2Keypair = generate_new_keypair().to_keypair()
|
||||||
|
|
||||||
|
let note = createNoteWithReferences(
|
||||||
|
content: "Mentioning users",
|
||||||
|
keypair: authorKeypair,
|
||||||
|
referencedPubkeys: [user1Keypair.pubkey, user2Keypair.pubkey]
|
||||||
|
)!
|
||||||
|
|
||||||
|
let authorMetadata = createMetadataEvent(keypair: authorKeypair, displayName: "Author")!
|
||||||
|
let user1Metadata = createMetadataEvent(keypair: user1Keypair, displayName: "User 1")!
|
||||||
|
let user2Metadata = createMetadataEvent(keypair: user2Keypair, displayName: "User 2")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note, authorMetadata, user1Metadata, user2Metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedNote = XCTestExpectation(description: "Received note")
|
||||||
|
let authorMetadataPreloaded = XCTestExpectation(description: "Author metadata preloaded")
|
||||||
|
let user1MetadataPreloaded = XCTestExpectation(description: "User 1 metadata preloaded")
|
||||||
|
let user2MetadataPreloaded = XCTestExpectation(description: "User 2 metadata preloaded")
|
||||||
|
|
||||||
|
// When: Streaming with .preload strategy
|
||||||
|
Task {
|
||||||
|
var noteReceived = false
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
if !noteReceived {
|
||||||
|
noteReceived = true
|
||||||
|
receivedNote.fulfill()
|
||||||
|
|
||||||
|
// Poll for all metadata to be preloaded
|
||||||
|
for _ in 0..<30 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(200))
|
||||||
|
|
||||||
|
var allPreloaded = true
|
||||||
|
|
||||||
|
if !((try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false) {
|
||||||
|
allPreloaded = false
|
||||||
|
} else if !authorMetadataPreloaded.isInverted {
|
||||||
|
authorMetadataPreloaded.fulfill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !((try? ndb.lookup_profile(user1Keypair.pubkey, borrow: { pr in pr != nil })) ?? false) {
|
||||||
|
allPreloaded = false
|
||||||
|
} else if !user1MetadataPreloaded.isInverted {
|
||||||
|
user1MetadataPreloaded.fulfill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !((try? ndb.lookup_profile(user2Keypair.pubkey, borrow: { pr in pr != nil })) ?? false) {
|
||||||
|
allPreloaded = false
|
||||||
|
} else if !user2MetadataPreloaded.isInverted {
|
||||||
|
user2MetadataPreloaded.fulfill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if allPreloaded {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should preload metadata for author and all referenced users
|
||||||
|
await fulfillment(of: [receivedNote, authorMetadataPreloaded, user1MetadataPreloaded, user2MetadataPreloaded], timeout: 30.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that preloading batches requests efficiently when multiple notes arrive
|
||||||
|
func testBatchingMultipleNotes() async throws {
|
||||||
|
// Given: A relay with multiple notes from different authors and their metadata
|
||||||
|
let relay = try await setupRelay(port: 9103)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let author1Keypair = test_keypair
|
||||||
|
let author2Keypair = generate_new_keypair().to_keypair()
|
||||||
|
let author3Keypair = generate_new_keypair().to_keypair()
|
||||||
|
|
||||||
|
let note1 = NostrEvent(content: "Note 1", keypair: author1Keypair)!
|
||||||
|
let note2 = NostrEvent(content: "Note 2", keypair: author2Keypair)!
|
||||||
|
let note3 = NostrEvent(content: "Note 3", keypair: author3Keypair)!
|
||||||
|
|
||||||
|
let metadata1 = createMetadataEvent(keypair: author1Keypair, displayName: "Author 1")!
|
||||||
|
let metadata2 = createMetadataEvent(keypair: author2Keypair, displayName: "Author 2")!
|
||||||
|
let metadata3 = createMetadataEvent(keypair: author3Keypair, displayName: "Author 3")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note1, note2, note3, metadata1, metadata2, metadata3], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedAllNotes = XCTestExpectation(description: "Received all notes")
|
||||||
|
receivedAllNotes.expectedFulfillmentCount = 3
|
||||||
|
|
||||||
|
let allMetadataPreloaded = XCTestExpectation(description: "All metadata preloaded")
|
||||||
|
|
||||||
|
// When: Streaming multiple notes with .preload strategy
|
||||||
|
Task {
|
||||||
|
var notesReceived = 0
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
notesReceived += 1
|
||||||
|
receivedAllNotes.fulfill()
|
||||||
|
|
||||||
|
if notesReceived >= 3 {
|
||||||
|
print("🔍 All 3 notes received, polling for metadata preload...")
|
||||||
|
print("📝 Author 1 pubkey: \(author1Keypair.pubkey.hex())")
|
||||||
|
print("📝 Author 2 pubkey: \(author2Keypair.pubkey.hex())")
|
||||||
|
print("📝 Author 3 pubkey: \(author3Keypair.pubkey.hex())")
|
||||||
|
|
||||||
|
// Poll for all metadata to be preloaded
|
||||||
|
// EntityPreloader batches after 1 second, plus time for negentropy sync
|
||||||
|
for iteration in 0..<60 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(500))
|
||||||
|
|
||||||
|
let hasAuthor1 = (try? ndb.lookup_profile(author1Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
let hasAuthor2 = (try? ndb.lookup_profile(author2Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
let hasAuthor3 = (try? ndb.lookup_profile(author3Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
|
||||||
|
if iteration % 4 == 0 { // Log every 2 seconds
|
||||||
|
print("⏱️ Iteration \(iteration): Author1=\(hasAuthor1), Author2=\(hasAuthor2), Author3=\(hasAuthor3)")
|
||||||
|
}
|
||||||
|
|
||||||
|
let hasAll = hasAuthor1 && hasAuthor2 && hasAuthor3
|
||||||
|
|
||||||
|
if hasAll {
|
||||||
|
print("✅ All metadata preloaded!")
|
||||||
|
allMetadataPreloaded.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final check
|
||||||
|
let finalAuthor1 = (try? ndb.lookup_profile(author1Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
let finalAuthor2 = (try? ndb.lookup_profile(author2Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
let finalAuthor3 = (try? ndb.lookup_profile(author3Keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
print("❌ Final state after timeout: Author1=\(finalAuthor1), Author2=\(finalAuthor2), Author3=\(finalAuthor3)")
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should batch and preload all metadata
|
||||||
|
await fulfillment(of: [receivedAllNotes, allMetadataPreloaded], timeout: 60.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that preloading works correctly with negentropy streaming mode
|
||||||
|
func testPreloadWithNegentropyStreaming() async throws {
|
||||||
|
// Given: A relay with a note and metadata, NDB has the note but not the metadata
|
||||||
|
let relay = try await setupRelay(port: 9104)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = test_keypair
|
||||||
|
let note = NostrEvent(content: "Test note", keypair: authorKeypair)!
|
||||||
|
let metadata = createMetadataEvent(keypair: authorKeypair, displayName: "Test Author")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note, metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
storeEventsInNdb([note], ndb: ndb)
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedNoteFromNdb = XCTestExpectation(description: "Received note from NDB")
|
||||||
|
let metadataPreloaded = XCTestExpectation(description: "Metadata preloaded")
|
||||||
|
|
||||||
|
// When: Using negentropy streaming with preload
|
||||||
|
Task {
|
||||||
|
var noteReceived = false
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: .negentropy),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
if !noteReceived {
|
||||||
|
noteReceived = true
|
||||||
|
receivedNoteFromNdb.fulfill()
|
||||||
|
|
||||||
|
// Poll for metadata to be preloaded
|
||||||
|
for _ in 0..<20 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(200))
|
||||||
|
if (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false {
|
||||||
|
metadataPreloaded.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should receive note from NDB and preload metadata from network
|
||||||
|
await fulfillment(of: [receivedNoteFromNdb, metadataPreloaded], timeout: 30.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that preloading doesn't duplicate requests for the same pubkey
|
||||||
|
func testPreloadDeduplication() async throws {
|
||||||
|
// Given: A relay with multiple notes from the same author and their metadata
|
||||||
|
let relay = try await setupRelay(port: 9105)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = test_keypair
|
||||||
|
|
||||||
|
let note1 = NostrEvent(content: "Note 1", keypair: authorKeypair)!
|
||||||
|
let note2 = NostrEvent(content: "Note 2", keypair: authorKeypair)!
|
||||||
|
let note3 = NostrEvent(content: "Note 3", keypair: authorKeypair)!
|
||||||
|
let metadata = createMetadataEvent(keypair: authorKeypair, displayName: "Test Author")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note1, note2, note3, metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedAllNotes = XCTestExpectation(description: "Received all notes")
|
||||||
|
receivedAllNotes.expectedFulfillmentCount = 3
|
||||||
|
|
||||||
|
let metadataPreloaded = XCTestExpectation(description: "Metadata preloaded")
|
||||||
|
|
||||||
|
// When: Streaming multiple notes from same author with .preload strategy
|
||||||
|
Task {
|
||||||
|
var notesReceived = 0
|
||||||
|
for await _ in networkManager.reader.streamIndefinitely(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
notesReceived += 1
|
||||||
|
receivedAllNotes.fulfill()
|
||||||
|
|
||||||
|
if notesReceived == 3 {
|
||||||
|
// Poll for metadata to be preloaded
|
||||||
|
for _ in 0..<20 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(200))
|
||||||
|
if (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false {
|
||||||
|
metadataPreloaded.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should receive all notes and preload metadata once (deduplication happens internally)
|
||||||
|
await fulfillment(of: [receivedAllNotes, metadataPreloaded], timeout: 10.0)
|
||||||
|
|
||||||
|
// Verify the metadata is in NDB
|
||||||
|
let hasProfile = (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
XCTAssertTrue(hasProfile, "Metadata should be preloaded")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test preloading with streamExistingEvents (which stops at EOSE)
|
||||||
|
func testPreloadWithStreamExistingEvents() async throws {
|
||||||
|
// Given: A relay with a note and metadata
|
||||||
|
let relay = try await setupRelay(port: 9106)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
let authorKeypair = test_keypair
|
||||||
|
let note = NostrEvent(content: "Test note", keypair: authorKeypair)!
|
||||||
|
let metadata = createMetadataEvent(keypair: authorKeypair, displayName: "Test Author")!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([note, metadata], to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
let receivedNote = XCTestExpectation(description: "Received note")
|
||||||
|
let streamEnded = XCTestExpectation(description: "Stream ended at EOSE")
|
||||||
|
let metadataPreloaded = XCTestExpectation(description: "Metadata preloaded")
|
||||||
|
|
||||||
|
// When: Using streamExistingEvents with preload
|
||||||
|
Task {
|
||||||
|
var noteReceived = false
|
||||||
|
for await _ in networkManager.reader.streamExistingEvents(
|
||||||
|
filters: [NostrFilter(kinds: [.text])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
if !noteReceived {
|
||||||
|
noteReceived = true
|
||||||
|
receivedNote.fulfill()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
streamEnded.fulfill()
|
||||||
|
|
||||||
|
// Poll for metadata after stream ends
|
||||||
|
for _ in 0..<20 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(200))
|
||||||
|
if (try? ndb.lookup_profile(authorKeypair.pubkey, borrow: { pr in pr != nil })) ?? false {
|
||||||
|
metadataPreloaded.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then: Should receive note, stream should end, and metadata should be preloaded
|
||||||
|
await fulfillment(of: [receivedNote, streamEnded, metadataPreloaded], timeout: 30.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MARK: - Follow Pack Race Condition Tests
|
||||||
|
|
||||||
|
/// Creates a follow pack event (kind 39089) with the specified users.
|
||||||
|
///
|
||||||
|
/// - Parameters:
|
||||||
|
/// - authorKeypair: The keypair of the follow pack creator
|
||||||
|
/// - title: Title of the follow pack
|
||||||
|
/// - packId: Unique identifier for the pack (d-tag)
|
||||||
|
/// - userPubkeys: Array of pubkeys to include in the pack
|
||||||
|
/// - Returns: A NostrEvent representing the follow pack
|
||||||
|
private func createFollowPackEvent(
|
||||||
|
authorKeypair: Keypair,
|
||||||
|
title: String,
|
||||||
|
packId: String,
|
||||||
|
userPubkeys: [Pubkey]
|
||||||
|
) -> NostrEvent? {
|
||||||
|
var tags: [[String]] = [
|
||||||
|
["title", title],
|
||||||
|
["d", packId],
|
||||||
|
["description", "Test follow pack for preloader testing"]
|
||||||
|
]
|
||||||
|
for pubkey in userPubkeys {
|
||||||
|
tags.append(["p", pubkey.hex()])
|
||||||
|
}
|
||||||
|
return NostrEvent(
|
||||||
|
content: "",
|
||||||
|
keypair: authorKeypair,
|
||||||
|
kind: NostrKind.follow_list.rawValue,
|
||||||
|
tags: tags
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that verifies the race condition: when a follow pack event is received,
|
||||||
|
/// the profiles listed in the pack are NOT yet available in NDB immediately after
|
||||||
|
/// the event is yielded to the stream consumer.
|
||||||
|
///
|
||||||
|
/// This test demonstrates the timing issue where views would render before
|
||||||
|
/// profile metadata has been preloaded.
|
||||||
|
func testFollowPackProfilesNotAvailableImmediately() async throws {
|
||||||
|
// Given: A relay with a follow pack event containing 80 users, and metadata for all users
|
||||||
|
let relay = try await setupRelay(port: 9107)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
// Create 65 users with their metadata
|
||||||
|
let allUserKeypairs = (0..<65).map { _ in generate_new_keypair().to_keypair() }
|
||||||
|
|
||||||
|
// Create metadata events for all users
|
||||||
|
let metadataEvents = allUserKeypairs.map { keypair in
|
||||||
|
createMetadataEvent(keypair: keypair, displayName: "User \(keypair.pubkey.hex().prefix(8))")!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a follow pack event with p-tags for all users
|
||||||
|
let packAuthorKeypair = test_keypair
|
||||||
|
let followPackEvent = createFollowPackEvent(
|
||||||
|
authorKeypair: packAuthorKeypair,
|
||||||
|
title: "Test Follow Pack",
|
||||||
|
packId: "test-pack-\(UUID().uuidString)",
|
||||||
|
userPubkeys: allUserKeypairs.map { $0.pubkey }
|
||||||
|
)!
|
||||||
|
|
||||||
|
// Also create metadata for the pack author
|
||||||
|
let packAuthorMetadata = createMetadataEvent(keypair: packAuthorKeypair, displayName: "Pack Author")!
|
||||||
|
|
||||||
|
// Debug: verify the follow pack event has all p-tags
|
||||||
|
let pTagCount = Array(followPackEvent.referenced_pubkeys).count
|
||||||
|
print("🔍 DEBUG: Follow pack event has \(pTagCount) referenced pubkeys (expected 80)")
|
||||||
|
|
||||||
|
// Send all events to the relay
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
print("🔍 DEBUG: Sending \(metadataEvents.count + 2) events to relay (1 follow pack + 1 author metadata + \(metadataEvents.count) user metadata)")
|
||||||
|
try await sendEvents([followPackEvent, packAuthorMetadata] + metadataEvents, to: relayConnection)
|
||||||
|
|
||||||
|
// Give the relay more time to store all events - 82 events need time to be processed
|
||||||
|
try await Task.sleep(for: .seconds(10))
|
||||||
|
print("🔍 DEBUG: Waited 3 seconds for relay to store events")
|
||||||
|
|
||||||
|
// NDB starts empty (no profiles)
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
// Track which profiles were available immediately vs after delay
|
||||||
|
var profilesAvailableImmediately: [Pubkey] = []
|
||||||
|
var profilesAvailableAfterDelay: [Pubkey] = []
|
||||||
|
|
||||||
|
let receivedFollowPack = XCTestExpectation(description: "Received follow pack event")
|
||||||
|
let checkedImmediateAvailability = XCTestExpectation(description: "Checked immediate availability")
|
||||||
|
let checkedDelayedAvailability = XCTestExpectation(description: "Checked delayed availability")
|
||||||
|
|
||||||
|
// When: Streaming follow pack events with .preload strategy
|
||||||
|
Task {
|
||||||
|
for await lender in networkManager.reader.streamExistingEvents(
|
||||||
|
filters: [NostrFilter(kinds: [.follow_list])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
receivedFollowPack.fulfill()
|
||||||
|
|
||||||
|
// IMMEDIATELY check which profiles are available (simulates view rendering)
|
||||||
|
// This is what happens when FollowPackView renders its profile pictures
|
||||||
|
for keypair in allUserKeypairs {
|
||||||
|
let hasProfile = (try? ndb.lookup_profile(keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
if hasProfile {
|
||||||
|
profilesAvailableImmediately.append(keypair.pubkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkedImmediateAvailability.fulfill()
|
||||||
|
|
||||||
|
print("📊 IMMEDIATE CHECK: \(profilesAvailableImmediately.count)/\(allUserKeypairs.count) profiles available")
|
||||||
|
|
||||||
|
// Now wait for preloading to complete (up to 5 seconds)
|
||||||
|
// The preloader batches after 1 second, plus time for negentropy sync
|
||||||
|
for iteration in 0..<25 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(200))
|
||||||
|
|
||||||
|
var allLoaded = true
|
||||||
|
for keypair in allUserKeypairs {
|
||||||
|
let hasProfile = (try? ndb.lookup_profile(keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
if !hasProfile {
|
||||||
|
allLoaded = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iteration % 5 == 0 {
|
||||||
|
var loadedCount = 0
|
||||||
|
for keypair in allUserKeypairs {
|
||||||
|
if (try? ndb.lookup_profile(keypair.pubkey, borrow: { pr in pr != nil })) ?? false {
|
||||||
|
loadedCount += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
print("⏱️ Iteration \(iteration): \(loadedCount)/\(allUserKeypairs.count) profiles loaded")
|
||||||
|
}
|
||||||
|
|
||||||
|
if allLoaded { break }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check which profiles are available after the delay
|
||||||
|
for keypair in allUserKeypairs {
|
||||||
|
let hasProfile = (try? ndb.lookup_profile(keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
if hasProfile {
|
||||||
|
profilesAvailableAfterDelay.append(keypair.pubkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkedDelayedAvailability.fulfill()
|
||||||
|
|
||||||
|
print("📊 DELAYED CHECK: \(profilesAvailableAfterDelay.count)/\(allUserKeypairs.count) profiles available")
|
||||||
|
|
||||||
|
break // Only process first event
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await fulfillment(of: [receivedFollowPack, checkedImmediateAvailability, checkedDelayedAvailability], timeout: 60.0)
|
||||||
|
|
||||||
|
// Then: This assertion demonstrates the race condition
|
||||||
|
// In the current implementation, profiles are NOT available immediately
|
||||||
|
// but SHOULD be available after the preloader has time to fetch them
|
||||||
|
|
||||||
|
print("📊 FINAL RESULTS:")
|
||||||
|
print(" - Profiles available immediately: \(profilesAvailableImmediately.count)/\(allUserKeypairs.count)")
|
||||||
|
print(" - Profiles available after delay: \(profilesAvailableAfterDelay.count)/\(allUserKeypairs.count)")
|
||||||
|
|
||||||
|
// This is the key assertion that demonstrates the bug:
|
||||||
|
// If preloading worked synchronously (or the view waited for preloading),
|
||||||
|
// all profiles would be available immediately.
|
||||||
|
// But with the current async preloading, they're not.
|
||||||
|
XCTAssertLessThan(
|
||||||
|
profilesAvailableImmediately.count,
|
||||||
|
allUserKeypairs.count,
|
||||||
|
"Bug demonstration: Profiles should NOT all be available immediately due to async preloading race condition. " +
|
||||||
|
"Got \(profilesAvailableImmediately.count)/\(allUserKeypairs.count) immediately available."
|
||||||
|
)
|
||||||
|
|
||||||
|
// But they should eventually be available after preloading completes
|
||||||
|
XCTAssertEqual(
|
||||||
|
profilesAvailableAfterDelay.count,
|
||||||
|
allUserKeypairs.count,
|
||||||
|
"After preloading delay, all profiles should be available. " +
|
||||||
|
"Got \(profilesAvailableAfterDelay.count)/\(allUserKeypairs.count) after delay."
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that measures the actual delay between receiving a follow pack event
|
||||||
|
/// and having all profiles available in NDB.
|
||||||
|
///
|
||||||
|
/// This helps quantify the user-perceived latency of profile "pop-in".
|
||||||
|
func testFollowPackPreloadingDelay() async throws {
|
||||||
|
// Given: A relay with a follow pack event containing users and their metadata
|
||||||
|
let relay = try await setupRelay(port: 9108)
|
||||||
|
let relayUrl = RelayURL(await relay.url().description)!
|
||||||
|
|
||||||
|
// Create 3 users with their metadata (smaller set for timing measurement)
|
||||||
|
let user1Keypair = generate_new_keypair().to_keypair()
|
||||||
|
let user2Keypair = generate_new_keypair().to_keypair()
|
||||||
|
let user3Keypair = generate_new_keypair().to_keypair()
|
||||||
|
|
||||||
|
let allUserKeypairs = [user1Keypair, user2Keypair, user3Keypair]
|
||||||
|
|
||||||
|
let metadataEvents = allUserKeypairs.map { keypair in
|
||||||
|
createMetadataEvent(keypair: keypair, displayName: "User \(keypair.pubkey.hex().prefix(8))")!
|
||||||
|
}
|
||||||
|
|
||||||
|
let packAuthorKeypair = test_keypair
|
||||||
|
let followPackEvent = createFollowPackEvent(
|
||||||
|
authorKeypair: packAuthorKeypair,
|
||||||
|
title: "Timing Test Pack",
|
||||||
|
packId: "timing-test-\(UUID().uuidString)",
|
||||||
|
userPubkeys: allUserKeypairs.map { $0.pubkey }
|
||||||
|
)!
|
||||||
|
|
||||||
|
let relayConnection = await connectToRelay(url: relayUrl)
|
||||||
|
try await sendEvents([followPackEvent] + metadataEvents, to: relayConnection)
|
||||||
|
|
||||||
|
let ndb = await generate_test_damus_state(mock_profile_info: nil).ndb
|
||||||
|
let networkManager = try await setupNetworkManager(with: [relayUrl], ndb: ndb)
|
||||||
|
|
||||||
|
var eventReceivedTime: Date?
|
||||||
|
var allProfilesLoadedTime: Date?
|
||||||
|
|
||||||
|
let testCompleted = XCTestExpectation(description: "Test completed")
|
||||||
|
|
||||||
|
// When: Streaming and measuring time to profile availability
|
||||||
|
Task {
|
||||||
|
for await _ in networkManager.reader.streamExistingEvents(
|
||||||
|
filters: [NostrFilter(kinds: [.follow_list])],
|
||||||
|
streamMode: .ndbAndNetworkParallel(networkOptimization: nil),
|
||||||
|
preloadStrategy: .preload
|
||||||
|
) {
|
||||||
|
eventReceivedTime = Date()
|
||||||
|
|
||||||
|
// Poll until all profiles are loaded (max 10 seconds)
|
||||||
|
for _ in 0..<100 {
|
||||||
|
try? await Task.sleep(for: .milliseconds(100))
|
||||||
|
|
||||||
|
var allLoaded = true
|
||||||
|
for keypair in allUserKeypairs {
|
||||||
|
let hasProfile = (try? ndb.lookup_profile(keypair.pubkey, borrow: { pr in pr != nil })) ?? false
|
||||||
|
if !hasProfile {
|
||||||
|
allLoaded = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allLoaded {
|
||||||
|
allProfilesLoadedTime = Date()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testCompleted.fulfill()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await fulfillment(of: [testCompleted], timeout: 30.0)
|
||||||
|
|
||||||
|
// Then: Measure and report the delay
|
||||||
|
guard let receivedTime = eventReceivedTime else {
|
||||||
|
XCTFail("Event was never received")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
guard let loadedTime = allProfilesLoadedTime else {
|
||||||
|
XCTFail("Profiles were never fully loaded within timeout")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let delay = loadedTime.timeIntervalSince(receivedTime)
|
||||||
|
print("⏱️ PRELOAD DELAY MEASUREMENT:")
|
||||||
|
print(" - Event received at: \(receivedTime)")
|
||||||
|
print(" - All profiles loaded at: \(loadedTime)")
|
||||||
|
print(" - Total delay: \(String(format: "%.2f", delay)) seconds")
|
||||||
|
|
||||||
|
// But it shouldn't take too long (within reasonable network latency)
|
||||||
|
XCTAssertLessThan(
|
||||||
|
delay,
|
||||||
|
3.0,
|
||||||
|
"Preloading should complete within a reasonable time. Actual delay: \(delay)s"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MARK: - Test Doubles
|
||||||
|
|
||||||
|
/// Test delegate for NostrNetworkManager that provides minimal configuration for testing
|
||||||
|
private final class TestNetworkDelegate: NostrNetworkManager.Delegate {
|
||||||
|
var ndb: Ndb
|
||||||
|
var keypair: Keypair
|
||||||
|
var latestRelayListEventIdHex: String?
|
||||||
|
var latestContactListEvent: NostrEvent?
|
||||||
|
var bootstrapRelays: [RelayURL]
|
||||||
|
var developerMode: Bool = false
|
||||||
|
var experimentalLocalRelayModelSupport: Bool = false
|
||||||
|
var relayModelCache: RelayModelCache
|
||||||
|
var relayFilters: RelayFilters
|
||||||
|
var nwcWallet: WalletConnectURL?
|
||||||
|
|
||||||
|
init(ndb: Ndb, keypair: Keypair, bootstrapRelays: [RelayURL]) {
|
||||||
|
self.ndb = ndb
|
||||||
|
self.keypair = keypair
|
||||||
|
self.bootstrapRelays = bootstrapRelays
|
||||||
|
self.relayModelCache = RelayModelCache()
|
||||||
|
self.relayFilters = RelayFilters(our_pubkey: keypair.pubkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -224,7 +224,7 @@ private final class MockSubscriptionManager: NostrNetworkManager.SubscriptionMan
|
|||||||
super.init(pool: pool, ndb: ndb, experimentalLocalRelayModelSupport: false)
|
super.init(pool: pool, ndb: ndb, experimentalLocalRelayModelSupport: false)
|
||||||
}
|
}
|
||||||
|
|
||||||
override func streamIndefinitely(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: NostrNetworkManager.StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
override func streamIndefinitely(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: NostrNetworkManager.StreamMode? = nil, preloadStrategy: NostrNetworkManager.PreloadStrategy? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||||
let lenders = queuedLenders
|
let lenders = queuedLenders
|
||||||
return AsyncStream { continuation in
|
return AsyncStream { continuation in
|
||||||
lenders.forEach { continuation.yield($0) }
|
lenders.forEach { continuation.yield($0) }
|
||||||
|
|||||||
9
justfile
9
justfile
@@ -1,5 +1,10 @@
|
|||||||
build:
|
build:
|
||||||
xcodebuild -scheme damus -sdk iphoneos -destination 'platform=iOS Simulator,OS=26.0,name=iPhone 16e' -quiet | xcbeautify --quieter
|
xcodebuild -scheme damus -sdk iphoneos -destination 'platform=iOS Simulator,OS=26.0,name=iPhone 16e' -quiet | xcbeautify --quieter
|
||||||
|
|
||||||
test:
|
test test_name="":
|
||||||
xcodebuild test -scheme damus -destination 'platform=iOS Simulator,OS=26.0,name=iPhone 16e' | xcbeautify
|
#!/usr/bin/env bash
|
||||||
|
if [ -n "{{test_name}}" ]; then
|
||||||
|
xcodebuild test -scheme damus -destination 'platform=iOS Simulator,OS=26.0,name=iPhone 16e' -only-testing:{{test_name}} | xcbeautify --quieter
|
||||||
|
else
|
||||||
|
xcodebuild test -scheme damus -destination 'platform=iOS Simulator,OS=26.0,name=iPhone 16e' | xcbeautify --quieter
|
||||||
|
fi
|
||||||
|
|||||||
@@ -150,6 +150,7 @@ struct UnownedNdbNote: ~Copyable {
|
|||||||
var createdAt: UInt32 { _ndbNote.created_at }
|
var createdAt: UInt32 { _ndbNote.created_at }
|
||||||
var id: NoteId { _ndbNote.id }
|
var id: NoteId { _ndbNote.id }
|
||||||
var sig: Signature { _ndbNote.sig }
|
var sig: Signature { _ndbNote.sig }
|
||||||
|
var referenced_pubkeys: References<Pubkey> { _ndbNote.referenced_pubkeys }
|
||||||
|
|
||||||
func toOwned() -> NdbNote {
|
func toOwned() -> NdbNote {
|
||||||
return _ndbNote.to_owned()
|
return _ndbNote.to_owned()
|
||||||
|
|||||||
Reference in New Issue
Block a user