Compare commits
443 Commits
nip-17-dms
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
650d4af504 | ||
|
|
114dde7883 | ||
|
|
b105dadd14 | ||
|
|
078042546b | ||
|
|
93834f8de2 | ||
|
|
760d0a8126 | ||
|
|
c934bc7653 | ||
|
|
527b53a7c8 | ||
|
|
ef262b3c22 | ||
|
|
28a2c23a76 | ||
|
|
e8e2653316 | ||
|
|
0233f2ae48 | ||
|
|
767b318763 | ||
|
|
4f401c6ce9 | ||
|
|
a4ad4960c4 | ||
|
|
4941b502d5 | ||
|
|
f506f9cfe8 | ||
|
|
81251ee88a | ||
|
|
cddee92f3a | ||
|
|
67e61417d9 | ||
|
|
be7a23bea8 | ||
|
|
f7fcb2cb91 | ||
|
|
d27d4e65cb | ||
|
|
71c36052e2 | ||
|
|
368f94a209 | ||
|
|
0cbeaf8ea8 | ||
|
|
20dc672dbf | ||
|
|
6d9107f662 | ||
|
|
a0cecdc8ad | ||
|
|
5058fb33d7 | ||
|
|
48143f859a | ||
|
|
d3a54458f5 | ||
|
|
9eda7e5886 | ||
|
|
674d4683c3 | ||
|
|
f5e5da25eb | ||
|
|
5066a39ffb | ||
|
|
f1b81a3e5c | ||
|
|
f844ed9931 | ||
|
|
b562b930cc | ||
|
|
2f7a40bd50 | ||
|
|
498af9bc3a | ||
|
|
066b5ff379 | ||
|
|
b8de67dcae | ||
|
|
44dfda8d33 | ||
|
|
7eafe973d9 | ||
|
|
44071e9d75 | ||
|
|
52115d07c2 | ||
|
|
d651084465 | ||
|
|
8c4783c622 | ||
|
|
48d3049f3f | ||
|
|
529bb0dca0 | ||
|
|
0879fa39dc | ||
|
|
b8c664d354 | ||
|
|
a31f6bce0e | ||
|
|
58f4988237 | ||
|
|
bd1eae5f26 | ||
|
|
5380918b15 | ||
|
|
1015b1cb08 | ||
|
|
9ca6b5e9ab | ||
|
|
56a7d1ed78 | ||
|
|
01150155ab | ||
|
|
a8202d89f8 | ||
|
|
d4402b0afc | ||
|
|
036afbf5b8 | ||
|
|
7ba2ec6713 | ||
|
|
36b40f53af | ||
|
|
58e6a49bcf | ||
|
|
7cf9a07099 | ||
|
|
7afcaa99fe | ||
|
|
10b4d804f8 | ||
|
|
e3d27ae472 | ||
|
|
02296d7752 | ||
|
|
9dfd338077 | ||
|
|
fe09f9da99 | ||
|
|
67d2b249b6 | ||
|
|
9555145359 | ||
|
|
8122a8a580 | ||
|
|
690f8b891e | ||
|
|
91426a79b9 | ||
|
|
61f695b7c6 | ||
|
|
6605c5e583 | ||
|
|
ab2c16288b | ||
|
|
991a4a86e6 | ||
|
|
7c1594107f | ||
|
|
05c02f7dc4 | ||
|
|
70d0d9dacf | ||
|
|
c80d4f146c | ||
|
|
9311a767c8 | ||
|
|
588ef46402 | ||
|
|
4f479d0280 | ||
|
|
7691b48fb6 | ||
|
|
01ec05ab32 | ||
|
|
61eb833239 | ||
|
|
d9306d4153 | ||
|
|
3437cf5347 | ||
|
|
667a228e1a | ||
|
|
84c4594d30 | ||
|
|
32e8c1b6e1 | ||
|
|
1b5f107ac6 | ||
|
|
fe62aea08a | ||
|
|
258d08723f | ||
|
|
9153a912b0 | ||
|
|
fe491bf694 | ||
|
|
e55675a336 | ||
|
|
eda4212aa7 | ||
|
|
798f9ec7b4 | ||
|
|
a09e22df24 | ||
|
|
a3ef36120e | ||
|
|
de528f3f70 | ||
|
|
8164eee479 | ||
|
|
0582892cae | ||
|
|
2185984ed7 | ||
|
|
1caad24364 | ||
|
|
ecbfb3714b | ||
|
|
d565eb20f7 | ||
|
|
a040a0244b | ||
|
|
387af198d6 | ||
|
|
66e10db6b2 | ||
|
|
42a0f2c08d | ||
|
|
aa8ce31941 | ||
|
|
8014d772ba | ||
|
|
4d8313c788 | ||
|
|
342067640f | ||
|
|
84839d1c43 | ||
|
|
b5079c42d5 | ||
|
|
0847c53a39 | ||
|
|
fa2d240ddf | ||
|
|
3a37a6c18e | ||
|
|
5c75e87ed5 | ||
|
|
64c16e7cc8 | ||
|
|
0b8090cb28 | ||
|
|
9cff8608f6 | ||
|
|
c728210be8 | ||
|
|
0f66e87faf | ||
|
|
af2298dcb7 | ||
|
|
a0b85129d4 | ||
|
|
e42b14cc6f | ||
|
|
f0521ba406 | ||
|
|
c29027ff5b | ||
|
|
c6674199de | ||
|
|
5961bf7958 | ||
|
|
a877a19c25 | ||
|
|
684701931d | ||
|
|
fcd8131063 | ||
|
|
3290e1f9d2 | ||
|
|
2bea2faf3f | ||
|
|
9bcee298d4 | ||
|
|
7eb759a8a0 | ||
|
|
2550d613b2 | ||
|
|
9fb7ed741e | ||
|
|
d766029f2b | ||
|
|
4478672c10 | ||
|
|
c43a37d2d3 | ||
|
|
ab22206093 | ||
|
|
de70d19135 | ||
|
|
0f26d50e08 | ||
|
|
9709e69dda | ||
|
|
809c8c80ac | ||
|
|
c4c3656f90 | ||
|
|
46c3667ec3 | ||
|
|
739a3a0b8c | ||
|
|
ab6ea7a9c1 | ||
|
|
9620dcf6ef | ||
|
|
a5aff15491 | ||
|
|
76b6d5c545 | ||
|
|
940b83f5c4 | ||
|
|
e113dee95e | ||
|
|
abd797b7b3 | ||
|
|
8083269709 | ||
|
|
5f3ce30826 | ||
|
|
578d47356d | ||
|
|
f2870b9a38 | ||
|
|
719a0c8cb0 | ||
| 89ad22833d | |||
|
|
9407c75d60 | ||
|
|
c4e6e5e6a7 | ||
|
|
592e9f9405 | ||
|
|
d924485bb3 | ||
|
|
b774f28427 | ||
|
|
deae6c0636 | ||
|
|
da386f3bcd | ||
|
55dbb46bb5
|
|||
|
|
dc8e647c34
|
||
|
|
eb25ff3584
|
||
|
|
0ae03fc3f3
|
||
|
|
e60f74eb9f
|
||
|
|
0d75f9cdd9
|
||
|
|
33a3ddbfd6
|
||
|
|
6555531846
|
||
|
|
97b9d06774
|
||
|
|
198448b114
|
||
|
|
a0333058a6
|
||
|
|
e640d5185e
|
||
|
|
9723718bc5
|
||
|
|
08e19fd395
|
||
|
|
7f39c3c4b2
|
||
|
|
cd3314c068
|
||
| f73c0ec1c4 | |||
|
|
05b62c5860 | ||
|
|
fae061cec0 | ||
|
|
4570ba797c | ||
|
|
d1ea081018 | ||
|
|
682704b2cb | ||
|
|
176f1a338a | ||
|
|
fc1eb326e8 | ||
|
|
5e420187e0 | ||
|
|
4815c8a6f7 | ||
|
|
f42ae0673d | ||
|
|
474e2d8d57 | ||
|
|
95a91bed7e | ||
|
|
ff12d8bd7e | ||
|
|
f8245a7b0e | ||
|
|
4036995348 | ||
|
|
5b6534fd56 | ||
|
|
bdd10cccaa | ||
|
|
e9f4cbe881 | ||
|
|
91abd187d3 | ||
|
|
b9d8b1dbf3 | ||
|
|
12a7b483a0 | ||
|
|
caa7802bce | ||
|
|
9c47d2e0bd | ||
|
|
5cd5a249ce | ||
|
|
c86b3a999d | ||
|
|
b5afa3c0b4 | ||
|
|
8f32c81b6c | ||
|
|
f8185d0ca5 | ||
|
|
eb99584501 | ||
|
|
919f644cba | ||
|
|
690e1347e0 | ||
|
|
744bf4bb07 | ||
|
|
475940aa01 | ||
|
|
28a06af534 | ||
|
|
208b3331ca | ||
|
|
5b1f0c4714 | ||
|
|
249e765642 | ||
|
|
712624f515 | ||
|
|
6e7b3b94d7 | ||
|
|
969a2b656e | ||
|
|
d8e7b4707e | ||
|
|
a51618cfd3 | ||
|
|
82da5da4d3 | ||
|
|
37f9c93705 | ||
|
|
094cf5e8cc | ||
|
|
46541694a0 | ||
|
|
04d4ff4e99 | ||
|
|
2d02766461 | ||
|
|
1e6873c879 | ||
|
|
d3496af5cc | ||
|
|
ec798bdeb2 | ||
|
|
fa9b952295 | ||
|
|
27f55bc09f | ||
|
|
52845a52bb | ||
|
|
4e27cca12b | ||
|
|
98e9ba25da | ||
|
|
e6cb6c938b | ||
|
|
af5961ce26 | ||
|
|
58de0025aa | ||
|
|
c931108741 | ||
|
|
20255198fd | ||
|
|
289a8e262a | ||
|
|
05baba9c03 | ||
|
|
e0461d3458 | ||
|
|
62aa72c215 | ||
|
|
287b35a8fb | ||
|
|
478d7b4060 | ||
|
|
2c4728508b | ||
|
|
d24a3f0ce5 | ||
|
|
efba599779 | ||
|
|
19243d49e1 | ||
|
|
6845d0df47 | ||
|
|
8e79ad582a | ||
|
|
282c02eed4 | ||
|
|
155ac27bb5 | ||
|
|
be1d149f4b | ||
|
|
9e0dc47e98 | ||
|
|
0916b14b32 | ||
|
|
6818d001f2 | ||
|
|
4bf9160502 | ||
|
|
02df1e209b | ||
|
|
3186b0e1d3 | ||
|
|
de0935582c | ||
|
|
573de6b881 | ||
|
|
44ab702792 | ||
|
|
1fdf234c46 | ||
|
|
3018200e95 | ||
|
|
47b79fc02e | ||
|
|
0c483bb55a | ||
|
|
ddd30054e8 | ||
|
|
30c5225ed0 | ||
|
|
8c446f804c | ||
|
|
e92018aee5 | ||
|
|
cfb140472d | ||
|
|
2f5fd54297 | ||
|
|
02e970eb9b | ||
|
|
b4b84e6895 | ||
|
|
7831ede057 | ||
|
|
a8d7d971b1 | ||
|
|
201cdd7edc | ||
|
|
e3ca6ca5b4 | ||
|
|
494386d211 | ||
|
|
6c53bc75f2 | ||
|
|
6001063754 | ||
|
|
eb0a1ee807 | ||
|
|
827731b9cb | ||
|
|
56d44d0004 | ||
|
|
7742c8fb3c | ||
|
|
7f2ee78512 | ||
|
|
4d75894bc4 | ||
|
|
bbed448ccb | ||
|
|
3fb4d81d48 | ||
|
|
fc30b68c40 | ||
|
|
0ac25b7aa3 | ||
|
|
b326f007f2 | ||
|
|
a86d8416fc | ||
|
|
b5c57dc935 | ||
|
|
7d6814a481 | ||
|
|
8dd048681b | ||
|
|
2d02a17af6 | ||
|
|
3171959d85 | ||
|
|
bca3716e33 | ||
|
|
57db252783 | ||
|
|
319579f912 | ||
|
|
92e1e4b08f | ||
|
|
ffc50bb2c1 | ||
|
|
a562be009d | ||
|
|
30c9bc7db7 | ||
|
|
0ac03df841 | ||
|
|
db99b4f4d4 | ||
|
|
cc9585b6e3 | ||
|
|
bd17dcfac6 | ||
|
|
25e91b386c | ||
|
|
560e9e53cd | ||
|
|
1c1e5fa2a0 | ||
|
|
2d5f86b142 | ||
|
|
89686d758a | ||
|
|
6c26add1da | ||
|
|
3c5a83392e | ||
|
|
1c63c3b9bb | ||
|
|
0bd4717e01 | ||
|
|
bebd531b58 | ||
|
|
5788c077c4 | ||
|
|
1b77b4f0e0 | ||
|
|
62625c6ff3 | ||
|
|
c8d88058d4 | ||
|
|
b8bef86ea1 | ||
|
|
b128330b2a | ||
|
|
934ea80f85 | ||
|
|
588cebd18d | ||
|
|
ccca6e58ec | ||
|
|
c1befa5221 | ||
|
|
8b3c86c5de | ||
|
|
05c5a6dacb | ||
|
|
1a6568deca | ||
|
|
1b2f4c41df | ||
|
|
25bcf9c243 | ||
|
|
3993679cc0 | ||
|
|
e302bf37fa | ||
|
|
a45f4d3087 | ||
|
|
d598e178c1 | ||
|
|
77601e77ee | ||
|
|
206efba58a | ||
|
|
a84749cd07 | ||
|
|
099b588be2 | ||
|
|
75c7adddb8 | ||
|
|
9f1b9ab945 | ||
|
|
b2080a946e | ||
|
|
942e47a720 | ||
|
|
6dbf3416b9 | ||
|
|
2b14acd62f | ||
|
|
267a9ac54b | ||
|
|
8b03ed6175 | ||
|
|
6cd7b945ca | ||
|
|
e5e6735129 | ||
|
|
9c2f7a931c | ||
|
|
b1bbf355de | ||
|
|
d7a2064786 | ||
|
|
4d14ca8d0a | ||
|
|
81d65cd5bf | ||
|
|
f03d8a5ac9 | ||
|
|
0df18ae1a4 | ||
|
|
8c5ec32eaa | ||
|
|
bdedf8bd8c | ||
|
|
c2383060aa | ||
|
|
432cdb96d9 | ||
|
|
f580c7dd93 | ||
|
|
c677233dcb | ||
|
|
d063362bd7 | ||
|
|
088683696a | ||
|
|
f2795aa71c | ||
|
|
c831976078 | ||
|
|
c2c73c3af6 | ||
|
|
971fa3e4ef | ||
|
|
dfa145dd4a | ||
|
|
4cfe28d802 | ||
|
|
034f2cc02f | ||
|
|
6f9bd6c4f4 | ||
|
|
d73422db38 | ||
|
|
c3b06d281e | ||
|
|
1b09e9458c | ||
|
|
e0a2dcf3db | ||
|
|
9ff1f69a82 | ||
|
|
623b8603c2 | ||
|
|
d8b083010d | ||
|
|
887eb4e1e2 | ||
|
|
b5ad3ed1a5 | ||
|
|
371e9fb406 | ||
|
|
aa5809d792 | ||
|
|
30ba0d72cc | ||
|
|
373cd71f69 | ||
|
|
acaf327a07 | ||
|
|
9f0bf7dff5 | ||
|
|
88d7eb8a86 | ||
|
|
76862776b8 | ||
|
|
4c55459c1f | ||
|
|
f7cdc7bc31 | ||
|
|
1bc4971111 | ||
|
|
6ce6c79160 | ||
|
|
1ffbd80c67 | ||
|
|
1fb88a912a | ||
|
|
954f48b23d | ||
|
|
cc75a8450a | ||
|
|
389c2c9695 | ||
|
|
4a6121ba13 | ||
|
|
a469f2e127 | ||
|
|
2f8f18b846 | ||
|
|
3a7cf4d08d | ||
|
|
e3001cc240 | ||
|
|
d1ef113a8b | ||
|
|
f187f4f8f2 | ||
|
|
4e9583ef54 | ||
|
|
cc95d5df6e | ||
|
|
4ca156fd83 | ||
|
|
9f6da8eb79 | ||
|
|
65a22813a3 | ||
| fdbf271432 | |||
| b26eedc633 | |||
| 793970beaf | |||
|
|
049d9170be | ||
|
|
fd10c5672a | ||
|
|
37bd9447f0 | ||
|
|
e8457d7486 | ||
|
|
280297ad35 | ||
|
|
7da3ead01e |
31
.github/pull_request_template.md
vendored
31
.github/pull_request_template.md
vendored
@@ -4,8 +4,37 @@ _[Please provide a summary of the changes in this PR.]_
|
||||
|
||||
## Checklist
|
||||
|
||||
<!--
|
||||
CHOOSE YOUR CHECKLIST:
|
||||
- If this is an EXPERIMENTAL DAMUS LABS FEATURE, follow the "Experimental Feature Checklist" below and DELETE the "Standard PR Checklist"
|
||||
- If this is a STANDARD PR, follow the "Standard PR Checklist" below and DELETE the "Experimental Feature Checklist"
|
||||
-->
|
||||
|
||||
### Experimental Feature Checklist
|
||||
|
||||
<!-- DELETE THIS SECTION if this is a standard PR -->
|
||||
|
||||
> [!TIP]
|
||||
> This Pull Request is an experimental feature for Damus Labs, and follows a fast-track review process.
|
||||
> The overall requirements are lowered and the review process is not as strict as usual. However, the feature will only be available for Purple users who opt-in.
|
||||
|
||||
- [ ] I have read (or I am familiar with) the [Contribution Guidelines](../docs/CONTRIBUTING.md).
|
||||
- [ ] I have done some testing on the changes in this PR to ensure it is at least functional.
|
||||
- [ ] I made sure that this new feature is only available when the user opts-in from the Damus Labs screen, and does not affect the rest of the app when turned off.
|
||||
- [ ] My PR is either small, or I have split it into smaller logical commits that are easier to review.
|
||||
- [ ] I have added the signoff line to all my commits. See [Signing off your work](../docs/CONTRIBUTING.md#sign-your-work---the-developers-certificate-of-origin).
|
||||
- [ ] I have added an appropriate changelog entry to my commit in this PR. See [Adding changelog entries](../docs/CONTRIBUTING.md#add-changelog-changed-changelog-fixed-etc).
|
||||
- Example changelog entry: `Changelog-Added: Added experimental feature <X> to Damus Labs`
|
||||
|
||||
### Standard PR Checklist
|
||||
|
||||
<!-- DELETE THIS SECTION if this is an experimental Damus Labs feature -->
|
||||
|
||||
- [ ] I have read (or I am familiar with) the [Contribution Guidelines](../docs/CONTRIBUTING.md)
|
||||
- [ ] I have tested the changes in this PR
|
||||
- [ ] I have profiled the changes to ensure there are no performance regressions, or I do not need to profile the changes.
|
||||
- Utilize Xcode profiler to measure performance impact of code changes. See https://developer.apple.com/videos/play/wwdc2025/306
|
||||
- If not needed, provide reason:
|
||||
- [ ] I have opened or referred to an existing github issue related to this change.
|
||||
- [ ] My PR is either small, or I have split it into smaller logical commits that are easier to review
|
||||
- [ ] I have added the signoff line to all my commits. See [Signing off your work](../docs/CONTRIBUTING.md#sign-your-work---the-developers-certificate-of-origin)
|
||||
@@ -34,4 +63,4 @@ _Please provide a test report for the changes in this PR. You can use the templa
|
||||
|
||||
## Other notes
|
||||
|
||||
_[Please provide any other information that you think is relevant to this PR.]_
|
||||
_[Please provide any other information that you think is relevant to this PR.]_
|
||||
47
AGENTS.md
Normal file
47
AGENTS.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Agents
|
||||
|
||||
## Damus Overview
|
||||
|
||||
Damus is an iOS client built around a local relay model ([damus-io/damus#3204](https://github.com/damus-io/damus/pull/3204)) to keep interactions snappy and resilient. The app operates on `nostrdb` ([source](https://github.com/damus-io/damus/tree/master/nostrdb)), and agents working on Damus should maximize usage of `nostrdb` facilities whenever possible.
|
||||
|
||||
## Codebase Layout
|
||||
|
||||
- `damus/` contains the SwiftUI app. Key subdirectories: `Core` (protocol, storage, networking, nostr primitives), `Features` (feature-specific flows like Timeline, Wallet, Purple), `Shared` (reusable UI components and utilities), `Models`, and localized resources (`*.lproj`, `en-US.xcloc`).
|
||||
- `nostrdb/` hosts the embedded database. Swift bindings (`Ndb.swift`, iterators) wrap a C/LMDB core; prefer these abstractions when working with persistence or queries.
|
||||
- `damus-c/` bridges C helpers (e.g., WASM runner) into Swift; check `damus-Bridging-Header.h` before adding new bridges.
|
||||
- `nostrscript/` contains AssemblyScript sources compiled to WASM via the top-level `Makefile`.
|
||||
- Tests live in `damusTests/` (unit/snapshot coverage) and `damusUITests/` (UI smoke tests). Keep them running before submitting changes.
|
||||
|
||||
## Development Workflow
|
||||
|
||||
- Use `just build` / `just test` for simulator builds and the primary test suite (requires `xcbeautify`). Update or add `just` recipes if new repeatable workflows emerge.
|
||||
- Xcode project is `damus.xcodeproj`; the main scheme is `damus`. Ensure new targets or resources integrate cleanly with this scheme.
|
||||
- Rebuild WASM helpers with `make` when touching `nostrscript/` sources.
|
||||
- Follow `docs/DEV_TIPS.md` for debugging (enabling Info logging, staging push notification settings) and keep tips updated when discovering new workflows.
|
||||
|
||||
## Testing Expectations
|
||||
|
||||
- Provide a concrete test report in each PR (see `.github/pull_request_template.md`). Document devices, OS versions, and scenarios exercised.
|
||||
- Add or update unit tests in `damusTests/` alongside feature changes, especially when touching parsing, storage, or replay logic.
|
||||
- UI regressions should include `damusUITests/` coverage or rationale when automation is impractical.
|
||||
- Snapshot fixtures under `damusTests/__Snapshots__` must be regenerated deliberately; explain updates in commit messages.
|
||||
|
||||
## Contribution Standards
|
||||
|
||||
- Sign all commits (`git commit -s`) and include appropriate `Changelog-*`, `Closes:`, or `Fixes:` tags as described in `docs/CONTRIBUTING.md`.
|
||||
- Keep patches scoped: one logical change per commit, ensuring the app builds and runs after each step.
|
||||
- Favor Swift-first solutions that lean on `nostrdb` types (`Ndb`, `NdbNote`, iterators) before introducing new storage mechanisms.
|
||||
- Update documentation when workflows change, especially this file, `README.md`, or developer notes.
|
||||
|
||||
## Agent Requirements
|
||||
|
||||
1. Code should tend toward simplicity.
|
||||
2. Commits should be logically distinct.
|
||||
3. Commits should be standalone.
|
||||
4. Code should be human readable.
|
||||
5. Code should be human reviewable.
|
||||
6. Ensure docstring coverage for any code added, or modified.
|
||||
7. Review and follow `pull_request_template.md` when creating PRs for iOS Damus.
|
||||
8. Ensure nevernesting: favor early returns and guard clauses over deeply nested conditionals; simplify control flow by exiting early instead of wrapping logic in multiple layers of `if` statements.
|
||||
9. Before proposing changes, please **review and analyze if a change or upgrade to nostrdb** is beneficial to the change at hand.
|
||||
10. **Never block the main thread**: All network requests, database queries, and expensive computations must run on background threads/queues. Use `Task { }`, `DispatchQueue.global()`, or Swift concurrency (`async/await`) appropriately. UI updates must dispatch back to `@MainActor`. Test for hangs and freezes before submitting.
|
||||
51
CHANGELOG.md
51
CHANGELOG.md
@@ -1,3 +1,54 @@
|
||||
## [1.15] - 2025-07-11
|
||||
|
||||
**Note:** This version was only released on TestFlight, and never officially released on the App Store.
|
||||
|
||||
### Added
|
||||
|
||||
- Added new onboarding suggestions based on user-selected interests (Daniel D’Aquino)
|
||||
- Added adjustable max budget setting for Coinos one-click wallets (Daniel D’Aquino)
|
||||
- Added send feature to the wallet view (Daniel D’Aquino)
|
||||
- Added popover tips to DMs and Notifications toolbars on Trusted Network button (Terry Yiu)
|
||||
- Added tip in threads to inform users what trusted network means (Terry Yiu)
|
||||
- Added web of trust reply sorting in threads to mitigate spam (Terry Yiu)
|
||||
- Added follow list kind 39089 (ericholguin)
|
||||
- Added follow pack preview (ericholguin)
|
||||
- Added follow pack timeline to Universe View (ericholguin)
|
||||
- Added NIP-05 favicon to profile names and NIP-05 web of trust feed (Terry Yiu)
|
||||
- Display uploading indicator in post view (Swift Coder)
|
||||
|
||||
|
||||
### Changed
|
||||
|
||||
- Improved the image sizing behavior on the image carousel for a smoother experience (Daniel D’Aquino)
|
||||
- Handle npub correctly in draft notes (Askia Linder)
|
||||
- Move users-section to be last in muted view (Askia Linder)
|
||||
- Removed media from regular link previews if media is already being shown (Terry Yiu)
|
||||
- Renamed Friends of Friends to Trusted Network (Terry Yiu)
|
||||
- Added privacy-based redaction to nsec in key settings view (Terry Yiu)
|
||||
- Added privacy-based redaction to wallet view (Terry Yiu)
|
||||
- Renamed Bitcoin Beach wallet to Blink (Terry Yiu)
|
||||
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed #nsfw tag filtering to be case insensitive (Terry Yiu)
|
||||
- Fixed stretchy banner header in Edit profile (Swift)
|
||||
- Fixed note rendering to include regular link previews with media removed when media previews are disabled (Terry Yiu)
|
||||
- Improve error handling on wallet send feature (Daniel D’Aquino)
|
||||
- Fixed issue where the text "??" would appear on the balance while loading (Daniel D’Aquino)
|
||||
- Hide end previewables when hashtags are present (Terry Yiu)
|
||||
- Fixed wallet transactions to always show profile display name unless there is no pubkey (Terry Yiu)
|
||||
- Fixed quotes view header alignment (Terry Yiu)
|
||||
|
||||
|
||||
### Removed
|
||||
|
||||
- Removed hashtags in Universe View (ericholguin)
|
||||
|
||||
|
||||
[1.15]: https://github.com/damus-io/damus/releases/tag/v1.15
|
||||
|
||||
|
||||
## [1.14] - 2025-05-25
|
||||
|
||||
### Added
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
import Foundation
|
||||
|
||||
@MainActor
|
||||
struct NotificationExtensionState: HeadlessDamusState {
|
||||
let ndb: Ndb
|
||||
let settings: UserSettingsStore
|
||||
|
||||
@@ -26,7 +26,7 @@ struct NotificationFormatter {
|
||||
content.title = NSLocalizedString("Someone posted a note", comment: "Title label for push notification where someone posted a note")
|
||||
content.body = event.content
|
||||
break
|
||||
case .deprecated_dm:
|
||||
case .dm:
|
||||
content.title = NSLocalizedString("New message", comment: "Title label for push notifications where a direct message was sent to the user")
|
||||
content.body = NSLocalizedString("(Contents are encrypted)", comment: "Label on push notification indicating that the contents of the message are encrypted")
|
||||
break
|
||||
@@ -103,7 +103,7 @@ struct NotificationFormatter {
|
||||
content.title = Self.zap_notification_title(zap)
|
||||
content.body = Self.zap_notification_body(profiles: state.profiles, zap: zap)
|
||||
content.sound = UNNotificationSound.default
|
||||
content.userInfo = LossyLocalNotification(type: .zap, mention: .note(notify.event.id)).to_user_info()
|
||||
content.userInfo = LossyLocalNotification(type: .zap, mention: .init(nip19: .note(notify.event.id))).to_user_info()
|
||||
return (content, "myZapNotification")
|
||||
default:
|
||||
// The sync method should have taken care of this.
|
||||
@@ -125,8 +125,7 @@ struct NotificationFormatter {
|
||||
let src = zap.request.ev
|
||||
let pk = zap.is_anon ? ANON_PUBKEY : src.pubkey
|
||||
|
||||
let profile_txn = profiles.lookup(id: pk)
|
||||
let profile = profile_txn?.unsafeUnownedValue
|
||||
let profile = try? profiles.lookup(id: pk)
|
||||
let name = Profile.displayName(profile: profile, pubkey: pk).displayName.truncate(maxLength: 50)
|
||||
|
||||
let sats = NSNumber(value: (Double(zap.invoice.amount) / 1000.0))
|
||||
|
||||
@@ -44,63 +44,61 @@ class NotificationService: UNNotificationServiceExtension {
|
||||
// Log that we got a push notification
|
||||
Log.debug("Got nostr event push notification from pubkey %s", for: .push_notifications, nostr_event.pubkey.hex())
|
||||
|
||||
guard let state = NotificationExtensionState() else {
|
||||
Log.debug("Failed to open nostrdb", for: .push_notifications)
|
||||
Task {
|
||||
guard let state = await NotificationExtensionState() else {
|
||||
Log.debug("Failed to open nostrdb", for: .push_notifications)
|
||||
|
||||
// Something failed to initialize so let's go for the next best thing
|
||||
guard let improved_content = NotificationFormatter.shared.format_message(event: nostr_event) else {
|
||||
// We cannot format this nostr event. Suppress notification.
|
||||
contentHandler(UNNotificationContent())
|
||||
// Something failed to initialize so let's go for the next best thing
|
||||
guard let improved_content = NotificationFormatter.shared.format_message(event: nostr_event) else {
|
||||
// We cannot format this nostr event. Suppress notification.
|
||||
contentHandler(UNNotificationContent())
|
||||
return
|
||||
}
|
||||
contentHandler(improved_content)
|
||||
return
|
||||
}
|
||||
contentHandler(improved_content)
|
||||
return
|
||||
}
|
||||
|
||||
let sender_profile = {
|
||||
let txn = state.ndb.lookup_profile(nostr_event.pubkey)
|
||||
let profile = txn?.unsafeUnownedValue?.profile
|
||||
let picture = ((profile?.picture.map { URL(string: $0) }) ?? URL(string: robohash(nostr_event.pubkey)))!
|
||||
return ProfileBuf(picture: picture,
|
||||
name: profile?.name,
|
||||
display_name: profile?.display_name,
|
||||
nip05: profile?.nip05)
|
||||
}()
|
||||
let sender_pubkey = nostr_event.pubkey
|
||||
let sender_profile = {
|
||||
let profile = try? state.profiles.lookup(id: nostr_event.pubkey)
|
||||
let picture = ((profile?.picture.map { URL(string: $0) }) ?? URL(string: robohash(nostr_event.pubkey)))!
|
||||
return ProfileBuf(picture: picture,
|
||||
name: profile?.name,
|
||||
display_name: profile?.display_name,
|
||||
nip05: profile?.nip05)
|
||||
}()
|
||||
let sender_pubkey = nostr_event.pubkey
|
||||
|
||||
// Don't show notification details that match mute list.
|
||||
// TODO: Remove this code block once we get notification suppression entitlement from Apple. It will be covered by the `guard should_display_notification` block
|
||||
if state.mutelist_manager.is_event_muted(nostr_event) {
|
||||
// We cannot really suppress muted notifications until we have the notification supression entitlement.
|
||||
// The best we can do if we ever get those muted notifications (which we generally won't due to server-side processing) is to obscure the details
|
||||
let content = UNMutableNotificationContent()
|
||||
content.title = NSLocalizedString("Muted event", comment: "Title for a push notification which has been muted")
|
||||
content.body = NSLocalizedString("This is an event that has been muted according to your mute list rules. We cannot suppress this notification, but we obscured the details to respect your preferences", comment: "Description for a push notification which has been muted, and explanation that we cannot suppress it")
|
||||
content.sound = UNNotificationSound.default
|
||||
contentHandler(content)
|
||||
return
|
||||
}
|
||||
// Don't show notification details that match mute list.
|
||||
// TODO: Remove this code block once we get notification suppression entitlement from Apple. It will be covered by the `guard should_display_notification` block
|
||||
if await state.mutelist_manager.is_event_muted(nostr_event) {
|
||||
// We cannot really suppress muted notifications until we have the notification supression entitlement.
|
||||
// The best we can do if we ever get those muted notifications (which we generally won't due to server-side processing) is to obscure the details
|
||||
let content = UNMutableNotificationContent()
|
||||
content.title = NSLocalizedString("Muted event", comment: "Title for a push notification which has been muted")
|
||||
content.body = NSLocalizedString("This is an event that has been muted according to your mute list rules. We cannot suppress this notification, but we obscured the details to respect your preferences", comment: "Description for a push notification which has been muted, and explanation that we cannot suppress it")
|
||||
content.sound = UNNotificationSound.default
|
||||
contentHandler(content)
|
||||
return
|
||||
}
|
||||
|
||||
guard should_display_notification(state: state, event: nostr_event, mode: .push) else {
|
||||
Log.debug("should_display_notification failed", for: .push_notifications)
|
||||
// We should not display notification for this event. Suppress notification.
|
||||
// contentHandler(UNNotificationContent())
|
||||
// TODO: We cannot really suppress until we have the notification supression entitlement. Show the raw notification
|
||||
contentHandler(request.content)
|
||||
return
|
||||
}
|
||||
guard await should_display_notification(state: state, event: nostr_event, mode: .push) else {
|
||||
Log.debug("should_display_notification failed", for: .push_notifications)
|
||||
// We should not display notification for this event. Suppress notification.
|
||||
// contentHandler(UNNotificationContent())
|
||||
// TODO: We cannot really suppress until we have the notification supression entitlement. Show the raw notification
|
||||
contentHandler(request.content)
|
||||
return
|
||||
}
|
||||
|
||||
guard let notification_object = generate_local_notification_object(from: nostr_event, state: state) else {
|
||||
Log.debug("generate_local_notification_object failed", for: .push_notifications)
|
||||
// We could not process this notification. Probably an unsupported nostr event kind. Suppress.
|
||||
// contentHandler(UNNotificationContent())
|
||||
// TODO: We cannot really suppress until we have the notification supression entitlement. Show the raw notification
|
||||
contentHandler(request.content)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
Task {
|
||||
guard let notification_object = generate_local_notification_object(ndb: state.ndb, from: nostr_event, state: state) else {
|
||||
Log.debug("generate_local_notification_object failed", for: .push_notifications)
|
||||
// We could not process this notification. Probably an unsupported nostr event kind. Suppress.
|
||||
// contentHandler(UNNotificationContent())
|
||||
// TODO: We cannot really suppress until we have the notification supression entitlement. Show the raw notification
|
||||
contentHandler(request.content)
|
||||
return
|
||||
}
|
||||
|
||||
let sender_dn = DisplayName(name: sender_profile.name, display_name: sender_profile.display_name, pubkey: sender_pubkey)
|
||||
guard let (improvedContent, _) = await NotificationFormatter.shared.format_message(displayName: sender_dn.displayName, notify: notification_object, state: state) else {
|
||||
|
||||
@@ -186,8 +184,13 @@ func message_intent_from_note(ndb: Ndb, sender_profile: ProfileBuf, content: Str
|
||||
|
||||
// gather recipients
|
||||
if let recipient_note_id = note.direct_replies() {
|
||||
let replying_to = ndb.lookup_note(recipient_note_id)
|
||||
if let replying_to_pk = replying_to?.unsafeUnownedValue?.pubkey {
|
||||
let replying_to_pk = try? ndb.lookup_note(recipient_note_id, borrow: { replying_to_note -> Pubkey? in
|
||||
switch replying_to_note {
|
||||
case .none: return nil
|
||||
case .some(let note): return note.pubkey
|
||||
}
|
||||
})
|
||||
if let replying_to_pk {
|
||||
meta.isReplyToCurrentUser = replying_to_pk == our_pubkey
|
||||
|
||||
if replying_to_pk != sender_pk {
|
||||
@@ -247,8 +250,12 @@ func message_intent_from_note(ndb: Ndb, sender_profile: ProfileBuf, content: Str
|
||||
}
|
||||
|
||||
func pubkey_to_inperson(ndb: Ndb, pubkey: Pubkey, our_pubkey: Pubkey) async -> INPerson {
|
||||
let profile_txn = ndb.lookup_profile(pubkey)
|
||||
let profile = profile_txn?.unsafeUnownedValue?.profile
|
||||
let profile = try? ndb.lookup_profile(pubkey, borrow: { profileRecord in
|
||||
switch profileRecord {
|
||||
case .some(let pr): return pr.profile
|
||||
case .none: return nil
|
||||
}
|
||||
})
|
||||
let name = profile?.name
|
||||
let display_name = profile?.display_name
|
||||
let nip05 = profile?.nip05
|
||||
|
||||
@@ -154,7 +154,7 @@ We have a few mailing lists that anyone can join to get involved in damus develo
|
||||
|
||||
### Contributing
|
||||
|
||||
See [docs/CONTRIBUTING.md](./docs/CONTRIBUTING.md)
|
||||
Before starting to work on any contributions, please read [docs/CONTRIBUTING.md](./docs/CONTRIBUTING.md).
|
||||
|
||||
### Privacy
|
||||
Your internet protocol (IP) address is exposed to the relays you connect to, and third party media hosters (e.g. nostr.build, imgur.com, giphy.com, youtube.com etc.) that render on Damus. If you want to improve your privacy, consider utilizing a service that masks your IP address (e.g. a VPN) from trackers online.
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
//
|
||||
// block.h
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2023-04-09.
|
||||
//
|
||||
|
||||
#ifndef block_h
|
||||
#define block_h
|
||||
|
||||
#include "nostr_bech32.h"
|
||||
#include "str_block.h"
|
||||
|
||||
#define MAX_BLOCKS 1024
|
||||
|
||||
enum block_type {
|
||||
BLOCK_HASHTAG = 1,
|
||||
BLOCK_TEXT = 2,
|
||||
BLOCK_MENTION_INDEX = 3,
|
||||
BLOCK_MENTION_BECH32 = 4,
|
||||
BLOCK_URL = 5,
|
||||
BLOCK_INVOICE = 6,
|
||||
};
|
||||
|
||||
|
||||
typedef struct invoice_block {
|
||||
struct str_block invstr;
|
||||
union {
|
||||
struct bolt11 *bolt11;
|
||||
};
|
||||
} invoice_block_t;
|
||||
|
||||
typedef struct mention_bech32_block {
|
||||
struct str_block str;
|
||||
struct nostr_bech32 bech32;
|
||||
} mention_bech32_block_t;
|
||||
|
||||
typedef struct note_block {
|
||||
enum block_type type;
|
||||
union {
|
||||
struct str_block str;
|
||||
struct invoice_block invoice;
|
||||
struct mention_bech32_block mention_bech32;
|
||||
int mention_index;
|
||||
} block;
|
||||
} block_t;
|
||||
|
||||
typedef struct note_blocks {
|
||||
int words;
|
||||
int num_blocks;
|
||||
struct note_block *blocks;
|
||||
} blocks_t;
|
||||
|
||||
void blocks_init(struct note_blocks *blocks);
|
||||
void blocks_free(struct note_blocks *blocks);
|
||||
|
||||
#endif /* block_h */
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use this file to import your target's public headers that you would like to expose to Swift.
|
||||
//
|
||||
|
||||
#include "damus.h"
|
||||
#include "bolt11.h"
|
||||
#include "amount.h"
|
||||
#include "nostr_bech32.h"
|
||||
|
||||
393
damus-c/damus.c
393
damus-c/damus.c
@@ -1,393 +0,0 @@
|
||||
//
|
||||
// damus.c
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2022-10-17.
|
||||
//
|
||||
|
||||
#include "damus.h"
|
||||
#include "cursor.h"
|
||||
#include "bolt11.h"
|
||||
#include "bech32.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
static int parse_digit(struct cursor *cur, int *digit) {
|
||||
int c;
|
||||
if ((c = peek_char(cur, 0)) == -1)
|
||||
return 0;
|
||||
|
||||
c -= '0';
|
||||
|
||||
if (c >= 0 && c <= 9) {
|
||||
*digit = c;
|
||||
cur->p++;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int parse_mention_index(struct cursor *cur, struct note_block *block) {
|
||||
int d1, d2, d3, ind;
|
||||
u8 *start = cur->p;
|
||||
|
||||
if (!parse_str(cur, "#["))
|
||||
return 0;
|
||||
|
||||
if (!parse_digit(cur, &d1)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ind = d1;
|
||||
|
||||
if (parse_digit(cur, &d2))
|
||||
ind = (d1 * 10) + d2;
|
||||
|
||||
if (parse_digit(cur, &d3))
|
||||
ind = (d1 * 100) + (d2 * 10) + d3;
|
||||
|
||||
if (!parse_char(cur, ']')) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
block->type = BLOCK_MENTION_INDEX;
|
||||
block->block.mention_index = ind;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int parse_hashtag(struct cursor *cur, struct note_block *block) {
|
||||
int c;
|
||||
u8 *start = cur->p;
|
||||
|
||||
if (!parse_char(cur, '#'))
|
||||
return 0;
|
||||
|
||||
c = peek_char(cur, 0);
|
||||
if (c == -1 || is_whitespace(c) || c == '#') {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
consume_until_boundary(cur);
|
||||
|
||||
block->type = BLOCK_HASHTAG;
|
||||
block->block.str.start = (const char*)(start + 1);
|
||||
block->block.str.end = (const char*)cur->p;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int add_block(struct note_blocks *blocks, struct note_block block)
|
||||
{
|
||||
if (blocks->num_blocks + 1 >= MAX_BLOCKS)
|
||||
return 0;
|
||||
|
||||
blocks->blocks[blocks->num_blocks++] = block;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int add_text_block(struct note_blocks *blocks, const u8 *start, const u8 *end)
|
||||
{
|
||||
struct note_block b;
|
||||
|
||||
if (start == end)
|
||||
return 1;
|
||||
|
||||
b.type = BLOCK_TEXT;
|
||||
b.block.str.start = (const char*)start;
|
||||
b.block.str.end = (const char*)end;
|
||||
|
||||
return add_block(blocks, b);
|
||||
}
|
||||
|
||||
static int consume_url_fragment(struct cursor *cur)
|
||||
{
|
||||
int c;
|
||||
|
||||
if ((c = peek_char(cur, 0)) < 0)
|
||||
return 1;
|
||||
|
||||
if (c != '#' && c != '?') {
|
||||
return 1;
|
||||
}
|
||||
|
||||
cur->p++;
|
||||
|
||||
return consume_until_end_url(cur, 1);
|
||||
}
|
||||
|
||||
static int consume_url_path(struct cursor *cur)
|
||||
{
|
||||
int c;
|
||||
|
||||
if ((c = peek_char(cur, 0)) < 0)
|
||||
return 1;
|
||||
|
||||
if (c != '/') {
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (cur->p < cur->end) {
|
||||
c = *cur->p;
|
||||
|
||||
if (c == '?' || c == '#' || is_final_url_char(cur->p, cur->end)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
cur->p++;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int consume_url_host(struct cursor *cur)
|
||||
{
|
||||
char c;
|
||||
int count = 0;
|
||||
|
||||
while (cur->p < cur->end) {
|
||||
c = *cur->p;
|
||||
// TODO: handle IDNs
|
||||
if ((is_alphanumeric(c) || c == '.' || c == '-') && !is_final_url_char(cur->p, cur->end))
|
||||
{
|
||||
count++;
|
||||
cur->p++;
|
||||
continue;
|
||||
}
|
||||
|
||||
return count != 0;
|
||||
}
|
||||
|
||||
|
||||
// this means the end of the URL hostname is the end of the buffer and we finished
|
||||
return count != 0;
|
||||
}
|
||||
|
||||
static int parse_url(struct cursor *cur, struct note_block *block) {
|
||||
u8 *start = cur->p;
|
||||
u8 *host;
|
||||
int host_len;
|
||||
struct cursor path_cur;
|
||||
|
||||
if (!parse_str(cur, "http"))
|
||||
return 0;
|
||||
|
||||
if (parse_char(cur, 's') || parse_char(cur, 'S')) {
|
||||
if (!parse_str(cur, "://")) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (!parse_str(cur, "://")) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// make sure to save the hostname. We will use this to detect damus.io links
|
||||
host = cur->p;
|
||||
|
||||
if (!consume_url_host(cur)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// get the length of the host string
|
||||
host_len = (int)(cur->p - host);
|
||||
|
||||
// save the current parse state so that we can continue from here when
|
||||
// parsing the bech32 in the damus.io link if we have it
|
||||
copy_cursor(cur, &path_cur);
|
||||
|
||||
// skip leading /
|
||||
cursor_skip(&path_cur, 1);
|
||||
|
||||
if (!consume_url_path(cur)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!consume_url_fragment(cur)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// smart parens
|
||||
if (start - 1 >= 0 &&
|
||||
start < cur->end &&
|
||||
*(start - 1) == '(' &&
|
||||
(cur->p - 1) < cur->end &&
|
||||
*(cur->p - 1) == ')')
|
||||
{
|
||||
cur->p--;
|
||||
}
|
||||
|
||||
// save the bech32 string pos in case we hit a damus.io link
|
||||
block->block.str.start = (const char *)path_cur.p;
|
||||
|
||||
// if we have a damus link, make it a mention
|
||||
if (host_len == 8
|
||||
&& !strncmp((const char *)host, "damus.io", 8)
|
||||
&& parse_nostr_bech32(&path_cur, &block->block.mention_bech32.bech32))
|
||||
{
|
||||
block->block.str.end = (const char *)path_cur.p;
|
||||
block->type = BLOCK_MENTION_BECH32;
|
||||
return 1;
|
||||
}
|
||||
|
||||
block->type = BLOCK_URL;
|
||||
block->block.str.start = (const char *)start;
|
||||
block->block.str.end = (const char *)cur->p;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int parse_invoice(struct cursor *cur, struct note_block *block) {
|
||||
u8 *start, *end;
|
||||
char *fail;
|
||||
struct bolt11 *bolt11;
|
||||
// optional
|
||||
parse_str(cur, "lightning:");
|
||||
|
||||
start = cur->p;
|
||||
|
||||
if (!parse_str(cur, "lnbc"))
|
||||
return 0;
|
||||
|
||||
if (!consume_until_whitespace(cur, 1)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
end = cur->p;
|
||||
|
||||
char str[end - start + 1];
|
||||
str[end - start] = 0;
|
||||
memcpy(str, start, end - start);
|
||||
|
||||
if (!(bolt11 = bolt11_decode(NULL, str, &fail))) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
block->type = BLOCK_INVOICE;
|
||||
|
||||
block->block.invoice.invstr.start = (const char*)start;
|
||||
block->block.invoice.invstr.end = (const char*)end;
|
||||
block->block.invoice.bolt11 = bolt11;
|
||||
|
||||
cur->p = end;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static int parse_mention_bech32(struct cursor *cur, struct note_block *block) {
|
||||
u8 *start = cur->p;
|
||||
|
||||
parse_char(cur, '@');
|
||||
parse_str(cur, "nostr:");
|
||||
|
||||
block->block.str.start = (const char *)cur->p;
|
||||
|
||||
if (!parse_nostr_bech32(cur, &block->block.mention_bech32.bech32)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
block->block.str.end = (const char *)cur->p;
|
||||
|
||||
block->type = BLOCK_MENTION_BECH32;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int add_text_then_block(struct cursor *cur, struct note_blocks *blocks, struct note_block block, u8 **start, const u8 *pre_mention)
|
||||
{
|
||||
if (!add_text_block(blocks, *start, pre_mention))
|
||||
return 0;
|
||||
|
||||
*start = (u8*)cur->p;
|
||||
|
||||
if (!add_block(blocks, block))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int damus_parse_content(struct note_blocks *blocks, const char *content) {
|
||||
int cp, c;
|
||||
struct cursor cur;
|
||||
struct note_block block;
|
||||
u8 *start, *pre_mention;
|
||||
|
||||
blocks->words = 0;
|
||||
blocks->num_blocks = 0;
|
||||
make_cursor((u8*)content, (u8*)content + strlen(content), &cur);
|
||||
|
||||
start = cur.p;
|
||||
while (cur.p < cur.end && blocks->num_blocks < MAX_BLOCKS) {
|
||||
cp = peek_char(&cur, -1);
|
||||
c = peek_char(&cur, 0);
|
||||
|
||||
// new word
|
||||
if (is_whitespace(cp) && !is_whitespace(c)) {
|
||||
blocks->words++;
|
||||
}
|
||||
|
||||
pre_mention = cur.p;
|
||||
if (cp == -1 || is_left_boundary(cp) || c == '#') {
|
||||
if (c == '#' && (parse_mention_index(&cur, &block) || parse_hashtag(&cur, &block))) {
|
||||
if (!add_text_then_block(&cur, blocks, block, &start, pre_mention))
|
||||
return 0;
|
||||
continue;
|
||||
} else if ((c == 'h' || c == 'H') && parse_url(&cur, &block)) {
|
||||
if (!add_text_then_block(&cur, blocks, block, &start, pre_mention))
|
||||
return 0;
|
||||
continue;
|
||||
} else if ((c == 'l' || c == 'L') && parse_invoice(&cur, &block)) {
|
||||
if (!add_text_then_block(&cur, blocks, block, &start, pre_mention))
|
||||
return 0;
|
||||
continue;
|
||||
} else if ((c == 'n' || c == '@') && parse_mention_bech32(&cur, &block)) {
|
||||
if (!add_text_then_block(&cur, blocks, block, &start, pre_mention))
|
||||
return 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
cur.p++;
|
||||
}
|
||||
|
||||
if (cur.p - start > 0) {
|
||||
if (!add_text_block(blocks, start, cur.p))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void blocks_init(struct note_blocks *blocks) {
|
||||
blocks->blocks = malloc(sizeof(struct note_block) * MAX_BLOCKS);
|
||||
blocks->num_blocks = 0;
|
||||
}
|
||||
|
||||
void blocks_free(struct note_blocks *blocks) {
|
||||
if (!blocks->blocks) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < blocks->num_blocks; ++i) {
|
||||
if (blocks->blocks[i].type == BLOCK_MENTION_BECH32) {
|
||||
free(blocks->blocks[i].block.mention_bech32.bech32.buffer);
|
||||
blocks->blocks[i].block.mention_bech32.bech32.buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
free(blocks->blocks);
|
||||
blocks->num_blocks = 0;
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
//
|
||||
// damus.h
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2022-10-17.
|
||||
//
|
||||
|
||||
#ifndef damus_h
|
||||
#define damus_h
|
||||
|
||||
#include <stdio.h>
|
||||
#include "block.h"
|
||||
|
||||
typedef unsigned char u8;
|
||||
|
||||
int damus_parse_content(struct note_blocks *blocks, const char *content);
|
||||
|
||||
#endif /* damus_h */
|
||||
@@ -1,84 +0,0 @@
|
||||
/* CC0 (Public domain) - see LICENSE file for details */
|
||||
#ifndef CCAN_HEX_H
|
||||
#define CCAN_HEX_H
|
||||
#include "config.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/**
|
||||
* hex_decode - Unpack a hex string.
|
||||
* @str: the hexadecimal string
|
||||
* @slen: the length of @str
|
||||
* @buf: the buffer to write the data into
|
||||
* @bufsize: the length of
|
||||
*
|
||||
* Returns false if there are any characters which aren't 0-9, a-f or A-F,
|
||||
* of the string wasn't the right length for @bufsize.
|
||||
*
|
||||
* Example:
|
||||
* unsigned char data[20];
|
||||
*
|
||||
* if (!hex_decode(argv[1], strlen(argv[1]), data, 20))
|
||||
* printf("String is malformed!\n");
|
||||
*/
|
||||
bool hex_decode(const char *str, size_t slen, void *buf, size_t bufsize);
|
||||
|
||||
/**
|
||||
* hex_encode - Create a nul-terminated hex string
|
||||
* @buf: the buffer to read the data from
|
||||
* @bufsize: the length of buf
|
||||
* @dest: the string to fill
|
||||
* @destsize: the max size of the string
|
||||
*
|
||||
* Returns true if the string, including terminator, fit in @destsize;
|
||||
*
|
||||
* Example:
|
||||
* unsigned char buf[] = { 0x1F, 0x2F };
|
||||
* char str[5];
|
||||
*
|
||||
* if (!hex_encode(buf, sizeof(buf), str, sizeof(str)))
|
||||
* abort();
|
||||
*/
|
||||
bool hex_encode(const void *buf, size_t bufsize, char *dest, size_t destsize);
|
||||
|
||||
/**
|
||||
* hex_str_size - Calculate how big a nul-terminated hex string is
|
||||
* @bytes: bytes of data to represent
|
||||
*
|
||||
* Example:
|
||||
* unsigned char buf[] = { 0x1F, 0x2F };
|
||||
* char str[hex_str_size(sizeof(buf))];
|
||||
*
|
||||
* hex_encode(buf, sizeof(buf), str, sizeof(str));
|
||||
*/
|
||||
static inline size_t hex_str_size(size_t bytes)
|
||||
{
|
||||
return 2 * bytes + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* hex_data_size - Calculate how many bytes of data in a hex string
|
||||
* @strlen: the length of the string (with or without NUL)
|
||||
*
|
||||
* Example:
|
||||
* const char str[] = "1F2F";
|
||||
* unsigned char buf[hex_data_size(sizeof(str))];
|
||||
*
|
||||
* hex_decode(str, strlen(str), buf, sizeof(buf));
|
||||
*/
|
||||
static inline size_t hex_data_size(size_t strlen)
|
||||
{
|
||||
return strlen / 2;
|
||||
}
|
||||
|
||||
static inline char hexchar(unsigned int val)
|
||||
{
|
||||
if (val < 10)
|
||||
return '0' + val;
|
||||
if (val < 16)
|
||||
return 'a' + val - 10;
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
#endif /* CCAN_HEX_H */
|
||||
@@ -1,325 +0,0 @@
|
||||
//
|
||||
// nostr_bech32.c
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2023-04-09.
|
||||
//
|
||||
|
||||
#include "nostr_bech32.h"
|
||||
#include <stdlib.h>
|
||||
#include "endian.h"
|
||||
#include "cursor.h"
|
||||
#include "bech32.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
#define MAX_TLVS 16
|
||||
|
||||
#define TLV_SPECIAL 0
|
||||
#define TLV_RELAY 1
|
||||
#define TLV_AUTHOR 2
|
||||
#define TLV_KIND 3
|
||||
#define TLV_KNOWN_TLVS 4
|
||||
|
||||
struct nostr_tlv {
|
||||
u8 type;
|
||||
u8 len;
|
||||
const u8 *value;
|
||||
};
|
||||
|
||||
struct nostr_tlvs {
|
||||
struct nostr_tlv tlvs[MAX_TLVS];
|
||||
int num_tlvs;
|
||||
};
|
||||
|
||||
static int parse_nostr_tlv(struct cursor *cur, struct nostr_tlv *tlv) {
|
||||
// get the tlv tag
|
||||
if (!pull_byte(cur, &tlv->type))
|
||||
return 0;
|
||||
|
||||
// unknown, fail!
|
||||
if (tlv->type >= TLV_KNOWN_TLVS)
|
||||
return 0;
|
||||
|
||||
// get the length
|
||||
if (!pull_byte(cur, &tlv->len))
|
||||
return 0;
|
||||
|
||||
// is the reported length greater then our buffer? if so fail
|
||||
if (cur->p + tlv->len > cur->end)
|
||||
return 0;
|
||||
|
||||
tlv->value = cur->p;
|
||||
cur->p += tlv->len;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int parse_nostr_tlvs(struct cursor *cur, struct nostr_tlvs *tlvs) {
|
||||
int i;
|
||||
tlvs->num_tlvs = 0;
|
||||
|
||||
for (i = 0; i < MAX_TLVS; i++) {
|
||||
if (parse_nostr_tlv(cur, &tlvs->tlvs[i])) {
|
||||
tlvs->num_tlvs++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (tlvs->num_tlvs == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int find_tlv(struct nostr_tlvs *tlvs, u8 type, struct nostr_tlv **tlv) {
|
||||
*tlv = NULL;
|
||||
|
||||
for (int i = 0; i < tlvs->num_tlvs; i++) {
|
||||
if (tlvs->tlvs[i].type == type) {
|
||||
*tlv = &tlvs->tlvs[i];
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_type(const char *prefix, enum nostr_bech32_type *type) {
|
||||
// Parse type
|
||||
if (strcmp(prefix, "note") == 0) {
|
||||
*type = NOSTR_BECH32_NOTE;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "npub") == 0) {
|
||||
*type = NOSTR_BECH32_NPUB;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "nsec") == 0) {
|
||||
*type = NOSTR_BECH32_NSEC;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "nprofile") == 0) {
|
||||
*type = NOSTR_BECH32_NPROFILE;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "nevent") == 0) {
|
||||
*type = NOSTR_BECH32_NEVENT;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "nrelay") == 0) {
|
||||
*type = NOSTR_BECH32_NRELAY;
|
||||
return 1;
|
||||
} else if (strcmp(prefix, "naddr") == 0) {
|
||||
*type = NOSTR_BECH32_NADDR;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_note(struct cursor *cur, struct bech32_note *note) {
|
||||
return pull_bytes(cur, 32, ¬e->event_id);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_npub(struct cursor *cur, struct bech32_npub *npub) {
|
||||
return pull_bytes(cur, 32, &npub->pubkey);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_nsec(struct cursor *cur, struct bech32_nsec *nsec) {
|
||||
return pull_bytes(cur, 32, &nsec->nsec);
|
||||
}
|
||||
|
||||
static int tlvs_to_relays(struct nostr_tlvs *tlvs, struct relays *relays) {
|
||||
struct nostr_tlv *tlv;
|
||||
struct str_block *str;
|
||||
|
||||
relays->num_relays = 0;
|
||||
|
||||
for (int i = 0; i < tlvs->num_tlvs; i++) {
|
||||
tlv = &tlvs->tlvs[i];
|
||||
if (tlv->type != TLV_RELAY)
|
||||
continue;
|
||||
|
||||
if (relays->num_relays + 1 > MAX_RELAYS)
|
||||
break;
|
||||
|
||||
str = &relays->relays[relays->num_relays++];
|
||||
str->start = (const char*)tlv->value;
|
||||
str->end = (const char*)(tlv->value + tlv->len);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint32_t decode_tlv_u32(const uint8_t *bytes) {
|
||||
beint32_t *be32_bytes = (beint32_t*)bytes;
|
||||
return be32_to_cpu(*be32_bytes);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_nevent(struct cursor *cur, struct bech32_nevent *nevent) {
|
||||
struct nostr_tlvs tlvs;
|
||||
struct nostr_tlv *tlv;
|
||||
|
||||
if (!parse_nostr_tlvs(cur, &tlvs))
|
||||
return 0;
|
||||
|
||||
if (!find_tlv(&tlvs, TLV_SPECIAL, &tlv))
|
||||
return 0;
|
||||
|
||||
if (tlv->len != 32)
|
||||
return 0;
|
||||
|
||||
nevent->event_id = tlv->value;
|
||||
|
||||
if (find_tlv(&tlvs, TLV_AUTHOR, &tlv)) {
|
||||
nevent->pubkey = tlv->value;
|
||||
} else {
|
||||
nevent->pubkey = NULL;
|
||||
}
|
||||
|
||||
if(find_tlv(&tlvs, TLV_KIND, &tlv)) {
|
||||
nevent->kind = decode_tlv_u32(tlv->value);
|
||||
nevent->has_kind = true;
|
||||
} else {
|
||||
nevent->has_kind = false;
|
||||
}
|
||||
|
||||
return tlvs_to_relays(&tlvs, &nevent->relays);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_naddr(struct cursor *cur, struct bech32_naddr *naddr) {
|
||||
struct nostr_tlvs tlvs;
|
||||
struct nostr_tlv *tlv;
|
||||
|
||||
if (!parse_nostr_tlvs(cur, &tlvs))
|
||||
return 0;
|
||||
|
||||
if (!find_tlv(&tlvs, TLV_SPECIAL, &tlv))
|
||||
return 0;
|
||||
|
||||
naddr->identifier.start = (const char*)tlv->value;
|
||||
naddr->identifier.end = (const char*)tlv->value + tlv->len;
|
||||
|
||||
if (!find_tlv(&tlvs, TLV_AUTHOR, &tlv))
|
||||
return 0;
|
||||
|
||||
naddr->pubkey = tlv->value;
|
||||
|
||||
if(!find_tlv(&tlvs, TLV_KIND, &tlv)) {
|
||||
return 0;
|
||||
}
|
||||
naddr->kind = decode_tlv_u32(tlv->value);
|
||||
|
||||
return tlvs_to_relays(&tlvs, &naddr->relays);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_nprofile(struct cursor *cur, struct bech32_nprofile *nprofile) {
|
||||
struct nostr_tlvs tlvs;
|
||||
struct nostr_tlv *tlv;
|
||||
|
||||
if (!parse_nostr_tlvs(cur, &tlvs))
|
||||
return 0;
|
||||
|
||||
if (!find_tlv(&tlvs, TLV_SPECIAL, &tlv))
|
||||
return 0;
|
||||
|
||||
if (tlv->len != 32)
|
||||
return 0;
|
||||
|
||||
nprofile->pubkey = tlv->value;
|
||||
|
||||
return tlvs_to_relays(&tlvs, &nprofile->relays);
|
||||
}
|
||||
|
||||
static int parse_nostr_bech32_nrelay(struct cursor *cur, struct bech32_nrelay *nrelay) {
|
||||
struct nostr_tlvs tlvs;
|
||||
struct nostr_tlv *tlv;
|
||||
|
||||
if (!parse_nostr_tlvs(cur, &tlvs))
|
||||
return 0;
|
||||
|
||||
if (!find_tlv(&tlvs, TLV_SPECIAL, &tlv))
|
||||
return 0;
|
||||
|
||||
nrelay->relay.start = (const char*)tlv->value;
|
||||
nrelay->relay.end = (const char*)tlv->value + tlv->len;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int parse_nostr_bech32(struct cursor *cur, struct nostr_bech32 *obj) {
|
||||
u8 *start, *end;
|
||||
|
||||
start = cur->p;
|
||||
|
||||
if (!consume_until_non_alphanumeric(cur, 1)) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
end = cur->p;
|
||||
|
||||
size_t data_len;
|
||||
size_t input_len = end - start;
|
||||
if (input_len < 10 || input_len > 10000) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
obj->buffer = malloc(input_len * 2);
|
||||
if (!obj->buffer)
|
||||
return 0;
|
||||
|
||||
u8 data[input_len];
|
||||
char prefix[input_len];
|
||||
|
||||
if (bech32_decode_len(prefix, data, &data_len, (const char*)start, input_len) == BECH32_ENCODING_NONE) {
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
obj->buflen = 0;
|
||||
if (!bech32_convert_bits(obj->buffer, &obj->buflen, 8, data, data_len, 5, 0)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!parse_nostr_bech32_type(prefix, &obj->type)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
struct cursor bcur;
|
||||
make_cursor(obj->buffer, obj->buffer + obj->buflen, &bcur);
|
||||
|
||||
switch (obj->type) {
|
||||
case NOSTR_BECH32_NOTE:
|
||||
if (!parse_nostr_bech32_note(&bcur, &obj->data.note))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NPUB:
|
||||
if (!parse_nostr_bech32_npub(&bcur, &obj->data.npub))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NSEC:
|
||||
if (!parse_nostr_bech32_nsec(&bcur, &obj->data.nsec))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NEVENT:
|
||||
if (!parse_nostr_bech32_nevent(&bcur, &obj->data.nevent))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NADDR:
|
||||
if (!parse_nostr_bech32_naddr(&bcur, &obj->data.naddr))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NPROFILE:
|
||||
if (!parse_nostr_bech32_nprofile(&bcur, &obj->data.nprofile))
|
||||
goto fail;
|
||||
break;
|
||||
case NOSTR_BECH32_NRELAY:
|
||||
if (!parse_nostr_bech32_nrelay(&bcur, &obj->data.nrelay))
|
||||
goto fail;
|
||||
break;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
fail:
|
||||
free(obj->buffer);
|
||||
cur->p = start;
|
||||
return 0;
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
//
|
||||
// nostr_bech32.h
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2023-04-09.
|
||||
//
|
||||
|
||||
#ifndef nostr_bech32_h
|
||||
#define nostr_bech32_h
|
||||
|
||||
#include <stdio.h>
|
||||
#include "str_block.h"
|
||||
#include "cursor.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
typedef unsigned char u8;
|
||||
#define MAX_RELAYS 10
|
||||
|
||||
struct relays {
|
||||
struct str_block relays[MAX_RELAYS];
|
||||
int num_relays;
|
||||
};
|
||||
|
||||
enum nostr_bech32_type {
|
||||
NOSTR_BECH32_NOTE = 1,
|
||||
NOSTR_BECH32_NPUB = 2,
|
||||
NOSTR_BECH32_NPROFILE = 3,
|
||||
NOSTR_BECH32_NEVENT = 4,
|
||||
NOSTR_BECH32_NRELAY = 5,
|
||||
NOSTR_BECH32_NADDR = 6,
|
||||
NOSTR_BECH32_NSEC = 7,
|
||||
};
|
||||
|
||||
struct bech32_note {
|
||||
const u8 *event_id;
|
||||
};
|
||||
|
||||
struct bech32_npub {
|
||||
const u8 *pubkey;
|
||||
};
|
||||
|
||||
struct bech32_nsec {
|
||||
const u8 *nsec;
|
||||
};
|
||||
|
||||
struct bech32_nevent {
|
||||
struct relays relays;
|
||||
const u8 *event_id;
|
||||
const u8 *pubkey; // optional
|
||||
uint32_t kind;
|
||||
bool has_kind;
|
||||
};
|
||||
|
||||
struct bech32_nprofile {
|
||||
struct relays relays;
|
||||
const u8 *pubkey;
|
||||
};
|
||||
|
||||
struct bech32_naddr {
|
||||
struct relays relays;
|
||||
struct str_block identifier;
|
||||
const u8 *pubkey;
|
||||
uint32_t kind;
|
||||
};
|
||||
|
||||
struct bech32_nrelay {
|
||||
struct str_block relay;
|
||||
};
|
||||
|
||||
typedef struct nostr_bech32 {
|
||||
enum nostr_bech32_type type;
|
||||
u8 *buffer; // holds strings and tlv stuff
|
||||
size_t buflen;
|
||||
|
||||
union {
|
||||
struct bech32_note note;
|
||||
struct bech32_npub npub;
|
||||
struct bech32_nsec nsec;
|
||||
struct bech32_nevent nevent;
|
||||
struct bech32_nprofile nprofile;
|
||||
struct bech32_naddr naddr;
|
||||
struct bech32_nrelay nrelay;
|
||||
} data;
|
||||
} nostr_bech32_t;
|
||||
|
||||
|
||||
int parse_nostr_bech32(struct cursor *cur, struct nostr_bech32 *obj);
|
||||
|
||||
#endif /* nostr_bech32_h */
|
||||
308
damus-c/sha256.c
308
damus-c/sha256.c
@@ -1,308 +0,0 @@
|
||||
/* MIT (BSD) license - see LICENSE file for details */
|
||||
/* SHA256 core code translated from the Bitcoin project's C++:
|
||||
*
|
||||
* src/crypto/sha256.cpp commit 417532c8acb93c36c2b6fd052b7c11b6a2906aa2
|
||||
* Copyright (c) 2014 The Bitcoin Core developers
|
||||
* Distributed under the MIT software license, see the accompanying
|
||||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
*/
|
||||
#include "sha256.h"
|
||||
#include "compiler.h"
|
||||
#include "endian.h"
|
||||
#include <stdbool.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
static void invalidate_sha256(struct sha256_ctx *ctx)
|
||||
{
|
||||
#ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
|
||||
ctx->c.md_len = 0;
|
||||
#else
|
||||
ctx->bytes = (size_t)-1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void check_sha256(struct sha256_ctx *ctx UNUSED)
|
||||
{
|
||||
#ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
|
||||
assert(ctx->c.md_len != 0);
|
||||
#else
|
||||
assert(ctx->bytes != (size_t)-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
|
||||
void sha256_init(struct sha256_ctx *ctx)
|
||||
{
|
||||
SHA256_Init(&ctx->c);
|
||||
}
|
||||
|
||||
void sha256_update(struct sha256_ctx *ctx, const void *p, size_t size)
|
||||
{
|
||||
check_sha256(ctx);
|
||||
SHA256_Update(&ctx->c, p, size);
|
||||
}
|
||||
|
||||
void sha256_done(struct sha256_ctx *ctx, struct sha256 *res)
|
||||
{
|
||||
SHA256_Final(res->u.u8, &ctx->c);
|
||||
invalidate_sha256(ctx);
|
||||
}
|
||||
#else
|
||||
static uint32_t Ch(uint32_t x, uint32_t y, uint32_t z)
|
||||
{
|
||||
return z ^ (x & (y ^ z));
|
||||
}
|
||||
static uint32_t Maj(uint32_t x, uint32_t y, uint32_t z)
|
||||
{
|
||||
return (x & y) | (z & (x | y));
|
||||
}
|
||||
static uint32_t Sigma0(uint32_t x)
|
||||
{
|
||||
return (x >> 2 | x << 30) ^ (x >> 13 | x << 19) ^ (x >> 22 | x << 10);
|
||||
}
|
||||
static uint32_t Sigma1(uint32_t x)
|
||||
{
|
||||
return (x >> 6 | x << 26) ^ (x >> 11 | x << 21) ^ (x >> 25 | x << 7);
|
||||
}
|
||||
static uint32_t sigma0(uint32_t x)
|
||||
{
|
||||
return (x >> 7 | x << 25) ^ (x >> 18 | x << 14) ^ (x >> 3);
|
||||
}
|
||||
static uint32_t sigma1(uint32_t x)
|
||||
{
|
||||
return (x >> 17 | x << 15) ^ (x >> 19 | x << 13) ^ (x >> 10);
|
||||
}
|
||||
|
||||
/** One round of SHA-256. */
|
||||
static void Round(uint32_t a, uint32_t b, uint32_t c, uint32_t *d, uint32_t e, uint32_t f, uint32_t g, uint32_t *h, uint32_t k, uint32_t w)
|
||||
{
|
||||
uint32_t t1 = *h + Sigma1(e) + Ch(e, f, g) + k + w;
|
||||
uint32_t t2 = Sigma0(a) + Maj(a, b, c);
|
||||
*d += t1;
|
||||
*h = t1 + t2;
|
||||
}
|
||||
|
||||
/** Perform one SHA-256 transformation, processing a 64-byte chunk. */
|
||||
static void Transform(uint32_t *s, const uint32_t *chunk)
|
||||
{
|
||||
uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
|
||||
uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
|
||||
|
||||
Round(a, b, c, &d, e, f, g, &h, 0x428a2f98, w0 = be32_to_cpu(chunk[0]));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0x71374491, w1 = be32_to_cpu(chunk[1]));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0xb5c0fbcf, w2 = be32_to_cpu(chunk[2]));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0xe9b5dba5, w3 = be32_to_cpu(chunk[3]));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x3956c25b, w4 = be32_to_cpu(chunk[4]));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0x59f111f1, w5 = be32_to_cpu(chunk[5]));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x923f82a4, w6 = be32_to_cpu(chunk[6]));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0xab1c5ed5, w7 = be32_to_cpu(chunk[7]));
|
||||
Round(a, b, c, &d, e, f, g, &h, 0xd807aa98, w8 = be32_to_cpu(chunk[8]));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0x12835b01, w9 = be32_to_cpu(chunk[9]));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0x243185be, w10 = be32_to_cpu(chunk[10]));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0x550c7dc3, w11 = be32_to_cpu(chunk[11]));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x72be5d74, w12 = be32_to_cpu(chunk[12]));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0x80deb1fe, w13 = be32_to_cpu(chunk[13]));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x9bdc06a7, w14 = be32_to_cpu(chunk[14]));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0xc19bf174, w15 = be32_to_cpu(chunk[15]));
|
||||
|
||||
Round(a, b, c, &d, e, f, g, &h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8));
|
||||
Round(a, b, c, &d, e, f, g, &h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0));
|
||||
|
||||
Round(a, b, c, &d, e, f, g, &h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8));
|
||||
Round(a, b, c, &d, e, f, g, &h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0));
|
||||
|
||||
Round(a, b, c, &d, e, f, g, &h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8));
|
||||
Round(a, b, c, &d, e, f, g, &h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9));
|
||||
Round(h, a, b, &c, d, e, f, &g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10));
|
||||
Round(g, h, a, &b, c, d, e, &f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11));
|
||||
Round(f, g, h, &a, b, c, d, &e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12));
|
||||
Round(e, f, g, &h, a, b, c, &d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13));
|
||||
Round(d, e, f, &g, h, a, b, &c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14));
|
||||
Round(c, d, e, &f, g, h, a, &b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15));
|
||||
Round(b, c, d, &e, f, g, h, &a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0));
|
||||
|
||||
s[0] += a;
|
||||
s[1] += b;
|
||||
s[2] += c;
|
||||
s[3] += d;
|
||||
s[4] += e;
|
||||
s[5] += f;
|
||||
s[6] += g;
|
||||
s[7] += h;
|
||||
}
|
||||
|
||||
static bool alignment_ok(const void *p UNUSED, size_t n UNUSED)
|
||||
{
|
||||
#if HAVE_UNALIGNED_ACCESS
|
||||
return true;
|
||||
#else
|
||||
return ((size_t)p % n == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void add(struct sha256_ctx *ctx, const void *p, size_t len)
|
||||
{
|
||||
const unsigned char *data = p;
|
||||
size_t bufsize = ctx->bytes % 64;
|
||||
|
||||
if (bufsize + len >= 64) {
|
||||
/* Fill the buffer, and process it. */
|
||||
memcpy(ctx->buf.u8 + bufsize, data, 64 - bufsize);
|
||||
ctx->bytes += 64 - bufsize;
|
||||
data += 64 - bufsize;
|
||||
len -= 64 - bufsize;
|
||||
Transform(ctx->s, ctx->buf.u32);
|
||||
bufsize = 0;
|
||||
}
|
||||
|
||||
while (len >= 64) {
|
||||
/* Process full chunks directly from the source. */
|
||||
if (alignment_ok(data, sizeof(uint32_t)))
|
||||
Transform(ctx->s, (const uint32_t *)data);
|
||||
else {
|
||||
memcpy(ctx->buf.u8, data, sizeof(ctx->buf));
|
||||
Transform(ctx->s, ctx->buf.u32);
|
||||
}
|
||||
ctx->bytes += 64;
|
||||
data += 64;
|
||||
len -= 64;
|
||||
}
|
||||
|
||||
if (len) {
|
||||
/* Fill the buffer with what remains. */
|
||||
memcpy(ctx->buf.u8 + bufsize, data, len);
|
||||
ctx->bytes += len;
|
||||
}
|
||||
}
|
||||
|
||||
void sha256_init(struct sha256_ctx *ctx)
|
||||
{
|
||||
struct sha256_ctx init = SHA256_INIT;
|
||||
*ctx = init;
|
||||
}
|
||||
|
||||
void sha256_update(struct sha256_ctx *ctx, const void *p, size_t size)
|
||||
{
|
||||
check_sha256(ctx);
|
||||
add(ctx, p, size);
|
||||
}
|
||||
|
||||
void sha256_done(struct sha256_ctx *ctx, struct sha256 *res)
|
||||
{
|
||||
static const unsigned char pad[64] = {0x80};
|
||||
uint64_t sizedesc;
|
||||
size_t i;
|
||||
|
||||
sizedesc = cpu_to_be64((uint64_t)ctx->bytes << 3);
|
||||
/* Add '1' bit to terminate, then all 0 bits, up to next block - 8. */
|
||||
add(ctx, pad, 1 + ((128 - 8 - (ctx->bytes % 64) - 1) % 64));
|
||||
/* Add number of bits of data (big endian) */
|
||||
add(ctx, &sizedesc, 8);
|
||||
for (i = 0; i < sizeof(ctx->s) / sizeof(ctx->s[0]); i++)
|
||||
res->u.u32[i] = cpu_to_be32(ctx->s[i]);
|
||||
invalidate_sha256(ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
void sha256(struct sha256 *sha, const void *p, size_t size)
|
||||
{
|
||||
struct sha256_ctx ctx;
|
||||
|
||||
sha256_init(&ctx);
|
||||
sha256_update(&ctx, p, size);
|
||||
sha256_done(&ctx, sha);
|
||||
}
|
||||
|
||||
void sha256_u8(struct sha256_ctx *ctx, uint8_t v)
|
||||
{
|
||||
sha256_update(ctx, &v, sizeof(v));
|
||||
}
|
||||
|
||||
void sha256_u16(struct sha256_ctx *ctx, uint16_t v)
|
||||
{
|
||||
sha256_update(ctx, &v, sizeof(v));
|
||||
}
|
||||
|
||||
void sha256_u32(struct sha256_ctx *ctx, uint32_t v)
|
||||
{
|
||||
sha256_update(ctx, &v, sizeof(v));
|
||||
}
|
||||
|
||||
void sha256_u64(struct sha256_ctx *ctx, uint64_t v)
|
||||
{
|
||||
sha256_update(ctx, &v, sizeof(v));
|
||||
}
|
||||
|
||||
/* Add as little-endian */
|
||||
void sha256_le16(struct sha256_ctx *ctx, uint16_t v)
|
||||
{
|
||||
leint16_t lev = cpu_to_le16(v);
|
||||
sha256_update(ctx, &lev, sizeof(lev));
|
||||
}
|
||||
|
||||
void sha256_le32(struct sha256_ctx *ctx, uint32_t v)
|
||||
{
|
||||
leint32_t lev = cpu_to_le32(v);
|
||||
sha256_update(ctx, &lev, sizeof(lev));
|
||||
}
|
||||
|
||||
void sha256_le64(struct sha256_ctx *ctx, uint64_t v)
|
||||
{
|
||||
leint64_t lev = cpu_to_le64(v);
|
||||
sha256_update(ctx, &lev, sizeof(lev));
|
||||
}
|
||||
|
||||
/* Add as big-endian */
|
||||
void sha256_be16(struct sha256_ctx *ctx, uint16_t v)
|
||||
{
|
||||
beint16_t bev = cpu_to_be16(v);
|
||||
sha256_update(ctx, &bev, sizeof(bev));
|
||||
}
|
||||
|
||||
void sha256_be32(struct sha256_ctx *ctx, uint32_t v)
|
||||
{
|
||||
beint32_t bev = cpu_to_be32(v);
|
||||
sha256_update(ctx, &bev, sizeof(bev));
|
||||
}
|
||||
|
||||
void sha256_be64(struct sha256_ctx *ctx, uint64_t v)
|
||||
{
|
||||
beint64_t bev = cpu_to_be64(v);
|
||||
sha256_update(ctx, &bev, sizeof(bev));
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
|
||||
#ifndef PROTOVERSE_TYPEDEFS_H
|
||||
#define PROTOVERSE_TYPEDEFS_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned int u32;
|
||||
typedef unsigned short u16;
|
||||
typedef uint64_t u64;
|
||||
typedef int64_t s64;
|
||||
|
||||
|
||||
#endif /* PROTOVERSE_TYPEDEFS_H */
|
||||
@@ -1179,7 +1179,7 @@ static INLINE int parse_i64(struct cursor *read, uint64_t *val)
|
||||
shift = 0;
|
||||
|
||||
do {
|
||||
if (!pull_byte(read, &byte))
|
||||
if (!cursor_pull_byte(read, &byte))
|
||||
return 0;
|
||||
*val |= (byte & 0x7FULL) << shift;
|
||||
shift += 7;
|
||||
@@ -1199,7 +1199,7 @@ static INLINE int uleb128_read(struct cursor *read, unsigned int *val)
|
||||
*val = 0;
|
||||
|
||||
for (;;) {
|
||||
if (!pull_byte(read, &byte))
|
||||
if (!cursor_pull_byte(read, &byte))
|
||||
return 0;
|
||||
|
||||
*val |= (0x7F & byte) << shift;
|
||||
@@ -1222,7 +1222,7 @@ static INLINE int sleb128_read(struct cursor *read, signed int *val)
|
||||
shift = 0;
|
||||
|
||||
do {
|
||||
if (!pull_byte(read, &byte))
|
||||
if (!cursor_pull_byte(read, &byte))
|
||||
return 0;
|
||||
*val |= ((byte & 0x7F) << shift);
|
||||
shift += 7;
|
||||
@@ -1241,21 +1241,21 @@ static INLINE int uleb128_read(struct cursor *read, unsigned int *val)
|
||||
unsigned char p[6] = {0};
|
||||
*val = 0;
|
||||
|
||||
if (pull_byte(read, &p[0]) && (p[0] & 0x80) == 0) {
|
||||
if (cursor_pull_byte(read, &p[0]) && (p[0] & 0x80) == 0) {
|
||||
*val = LEB128_1(unsigned int);
|
||||
if (p[0] == 0x7F)
|
||||
assert((int)*val == -1);
|
||||
return 1;
|
||||
} else if (pull_byte(read, &p[1]) && (p[1] & 0x80) == 0) {
|
||||
} else if (cursor_pull_byte(read, &p[1]) && (p[1] & 0x80) == 0) {
|
||||
*val = LEB128_2(unsigned int);
|
||||
return 2;
|
||||
} else if (pull_byte(read, &p[2]) && (p[2] & 0x80) == 0) {
|
||||
} else if (cursor_pull_byte(read, &p[2]) && (p[2] & 0x80) == 0) {
|
||||
*val = LEB128_3(unsigned int);
|
||||
return 3;
|
||||
} else if (pull_byte(read, &p[3]) && (p[3] & 0x80) == 0) {
|
||||
} else if (cursor_pull_byte(read, &p[3]) && (p[3] & 0x80) == 0) {
|
||||
*val = LEB128_4(unsigned int);
|
||||
return 4;
|
||||
} else if (pull_byte(read, &p[4]) && (p[4] & 0x80) == 0) {
|
||||
} else if (cursor_pull_byte(read, &p[4]) && (p[4] & 0x80) == 0) {
|
||||
if (!(p[4] & 0xF0)) {
|
||||
*val = LEB128_5(unsigned int);
|
||||
return 5;
|
||||
@@ -1296,7 +1296,7 @@ static int parse_section_tag(struct cursor *cur, enum section_tag *section)
|
||||
|
||||
start = cur->p;
|
||||
|
||||
if (!pull_byte(cur, &byte)) {
|
||||
if (!cursor_pull_byte(cur, &byte)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1315,7 +1315,7 @@ static int parse_valtype(struct wasm_parser *p, enum valtype *valtype)
|
||||
|
||||
start = p->cur.p;
|
||||
|
||||
if (unlikely(!pull_byte(&p->cur, (unsigned char*)valtype))) {
|
||||
if (unlikely(!cursor_pull_byte(&p->cur, (unsigned char*)valtype))) {
|
||||
return parse_err(p, "valtype tag oob");
|
||||
}
|
||||
|
||||
@@ -1416,7 +1416,7 @@ static int parse_export_desc(struct wasm_parser *p, enum exportdesc *desc)
|
||||
{
|
||||
unsigned char byte;
|
||||
|
||||
if (!pull_byte(&p->cur, &byte)) {
|
||||
if (!cursor_pull_byte(&p->cur, &byte)) {
|
||||
parse_err(p, "export desc byte eof");
|
||||
return 0;
|
||||
}
|
||||
@@ -1523,7 +1523,7 @@ static int parse_name_subsection(struct wasm_parser *p, struct namesec *sec, u32
|
||||
u8 tag;
|
||||
u8 *start = p->cur.p;
|
||||
|
||||
if (!pull_byte(&p->cur, &tag))
|
||||
if (!cursor_pull_byte(&p->cur, &tag))
|
||||
return parse_err(p, "name subsection tag oob?");
|
||||
|
||||
if (!is_valid_name_subsection(tag))
|
||||
@@ -1676,7 +1676,7 @@ static int parse_reftype(struct wasm_parser *p, enum reftype *reftype)
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
if (!pull_byte(&p->cur, &tag)) {
|
||||
if (!cursor_pull_byte(&p->cur, &tag)) {
|
||||
parse_err(p, "reftype");
|
||||
return 0;
|
||||
}
|
||||
@@ -1720,7 +1720,7 @@ static int parse_export_section(struct wasm_parser *p,
|
||||
static int parse_limits(struct wasm_parser *p, struct limits *limits)
|
||||
{
|
||||
unsigned char tag;
|
||||
if (!pull_byte(&p->cur, &tag)) {
|
||||
if (!cursor_pull_byte(&p->cur, &tag)) {
|
||||
return parse_err(p, "oob");
|
||||
}
|
||||
|
||||
@@ -1803,7 +1803,7 @@ static void print_code(u8 *code, int code_len)
|
||||
make_cursor(code, code + code_len, &c);
|
||||
|
||||
for (;;) {
|
||||
if (!pull_byte(&c, &tag)) {
|
||||
if (!cursor_pull_byte(&c, &tag)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2169,7 +2169,7 @@ static int parse_const_expr(struct expr_parser *p, struct expr *expr)
|
||||
expr->code = p->code->p;
|
||||
|
||||
while (1) {
|
||||
if (unlikely(!pull_byte(p->code, &tag))) {
|
||||
if (unlikely(!cursor_pull_byte(p->code, &tag))) {
|
||||
return note_error(p->errs, p->code, "oob");
|
||||
}
|
||||
|
||||
@@ -2332,7 +2332,7 @@ static int parse_instrs_until_at(struct expr_parser *p, u8 stop_instr,
|
||||
p->code->p - p->code->start,
|
||||
dbg_inst, instr_name(stop_instr));
|
||||
for (;;) {
|
||||
if (!pull_byte(p->code, &tag))
|
||||
if (!cursor_pull_byte(p->code, &tag))
|
||||
return note_error(p->errs, p->code, "oob");
|
||||
|
||||
if ((tag != i_if && tag == stop_instr) ||
|
||||
@@ -2413,7 +2413,7 @@ static int parse_element(struct wasm_parser *p, struct elem *elem)
|
||||
|
||||
make_expr_parser(&p->errs, &p->cur, &expr_parser);
|
||||
|
||||
if (!pull_byte(&p->cur, &tag))
|
||||
if (!cursor_pull_byte(&p->cur, &tag))
|
||||
return parse_err(p, "tag");
|
||||
|
||||
if (tag > 7)
|
||||
@@ -2545,7 +2545,7 @@ static int parse_wdata(struct wasm_parser *p, struct wdata *data)
|
||||
struct expr_parser parser;
|
||||
u8 tag;
|
||||
|
||||
if (!pull_byte(&p->cur, &tag)) {
|
||||
if (!cursor_pull_byte(&p->cur, &tag)) {
|
||||
return parse_err(p, "tag");
|
||||
}
|
||||
|
||||
@@ -2700,7 +2700,7 @@ static int parse_importdesc(struct wasm_parser *p, struct importdesc *desc)
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
if (!pull_byte(&p->cur, &tag)) {
|
||||
if (!cursor_pull_byte(&p->cur, &tag)) {
|
||||
parse_err(p, "oom");
|
||||
return 0;
|
||||
}
|
||||
@@ -4134,7 +4134,7 @@ static int parse_blocktype(struct cursor *cur, struct errors *errs, struct block
|
||||
{
|
||||
unsigned char byte;
|
||||
|
||||
if (unlikely(!pull_byte(cur, &byte))) {
|
||||
if (unlikely(!cursor_pull_byte(cur, &byte))) {
|
||||
return note_error(errs, cur, "parse_blocktype: oob\n");
|
||||
}
|
||||
|
||||
@@ -4656,7 +4656,7 @@ static int parse_bulk_op(struct cursor *code, struct errors *errs,
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
if (unlikely(!pull_byte(code, &tag)))
|
||||
if (unlikely(!cursor_pull_byte(code, &tag)))
|
||||
return note_error(errs, code, "oob");
|
||||
|
||||
if (unlikely(tag < 10 || tag > 17))
|
||||
@@ -6552,7 +6552,7 @@ static INLINE int interp_parse_instr(struct wasm_interp *interp,
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
if (unlikely(!pull_byte(code, &tag))) {
|
||||
if (unlikely(!cursor_pull_byte(code, &tag))) {
|
||||
return interp_error(interp, "no more instrs to pull");
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ static const unsigned char WASM_MAGIC[] = {0,'a','s','m'};
|
||||
#define interp_error(p, fmt, ...) note_error(&((p)->errors), interp_codeptr(p), fmt, ##__VA_ARGS__)
|
||||
#define parse_err(p, fmt, ...) note_error(&((p)->errs), &(p)->cur, fmt, ##__VA_ARGS__)
|
||||
|
||||
#include "short_types.h"
|
||||
|
||||
enum valtype {
|
||||
val_i32 = 0x7F,
|
||||
val_i64 = 0x7E,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,23 @@ enum AppAccessibilityIdentifiers: String {
|
||||
case sign_in_confirm_button
|
||||
|
||||
|
||||
// MARK: Sign Up / Create Account
|
||||
// Prefix: `sign_up`
|
||||
|
||||
/// Button to navigate to create account view
|
||||
case sign_up_option_button
|
||||
/// Text field for entering name during account creation
|
||||
case sign_up_name_field
|
||||
/// Text field for entering bio during account creation
|
||||
case sign_up_bio_field
|
||||
/// Button to proceed to the next step after entering profile info
|
||||
case sign_up_next_button
|
||||
/// Button to save keys after account creation
|
||||
case sign_up_save_keys_button
|
||||
/// Button to skip saving keys
|
||||
case sign_up_skip_save_keys_button
|
||||
|
||||
|
||||
// MARK: Onboarding
|
||||
// Prefix: `onboarding`
|
||||
|
||||
@@ -43,9 +60,22 @@ enum AppAccessibilityIdentifiers: String {
|
||||
|
||||
// MARK: Post composer
|
||||
// Prefix: `post_composer`
|
||||
|
||||
|
||||
/// The cancel post button
|
||||
case post_composer_cancel_button
|
||||
|
||||
/// The text view where the user types their note
|
||||
case post_composer_text_view
|
||||
|
||||
/// A user result in the mention autocomplete list
|
||||
case post_composer_mention_user_result
|
||||
|
||||
|
||||
// MARK: Post button (FAB)
|
||||
// Prefix: `post_button`
|
||||
|
||||
/// The floating action button to create a new post
|
||||
case post_button
|
||||
|
||||
// MARK: Main interface layout
|
||||
// Prefix: `main`
|
||||
@@ -60,6 +90,12 @@ enum AppAccessibilityIdentifiers: String {
|
||||
/// The profile option in the side menu
|
||||
case side_menu_profile_button
|
||||
|
||||
/// The logout button in the side menu
|
||||
case side_menu_logout_button
|
||||
|
||||
/// The logout confirmation button in the alert dialog
|
||||
case side_menu_logout_confirm_button
|
||||
|
||||
|
||||
// MARK: Items specific to the user's own profile
|
||||
// Prefix: `own_profile`
|
||||
21
damus/Assets.xcassets/Illustrations/damooseLabs.imageset/Contents.json
vendored
Normal file
21
damus/Assets.xcassets/Illustrations/damooseLabs.imageset/Contents.json
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"images" : [
|
||||
{
|
||||
"filename" : "damooseLabs.png",
|
||||
"idiom" : "universal",
|
||||
"scale" : "1x"
|
||||
},
|
||||
{
|
||||
"idiom" : "universal",
|
||||
"scale" : "2x"
|
||||
},
|
||||
{
|
||||
"idiom" : "universal",
|
||||
"scale" : "3x"
|
||||
}
|
||||
],
|
||||
"info" : {
|
||||
"author" : "xcode",
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
BIN
damus/Assets.xcassets/Illustrations/damooseLabs.imageset/damooseLabs.png
vendored
Normal file
BIN
damus/Assets.xcassets/Illustrations/damooseLabs.imageset/damooseLabs.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.5 MiB |
@@ -1,15 +0,0 @@
|
||||
//
|
||||
// AlbyGradient.swift
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2023-05-09.
|
||||
//
|
||||
|
||||
import SwiftUI
|
||||
|
||||
fileprivate let alby_grad_c1 = hex_col(r: 226, g: 168, b: 122)
|
||||
fileprivate let alby_grad_c2 = hex_col(r: 249, g: 223, b: 127)
|
||||
fileprivate let alby_grad = [alby_grad_c2, alby_grad_c1]
|
||||
|
||||
let AlbyGradient: LinearGradient =
|
||||
LinearGradient(colors: alby_grad, startPoint: .bottomLeading, endPoint: .topTrailing)
|
||||
@@ -135,6 +135,7 @@ struct ContentView: View {
|
||||
@StateObject var navigationCoordinator: NavigationCoordinator = NavigationCoordinator()
|
||||
@AppStorage("has_seen_suggested_users") private var hasSeenOnboardingSuggestions = false
|
||||
let sub_id = UUID().description
|
||||
@State var damusClosingTask: Task<Void, Never>? = nil
|
||||
|
||||
// connect retry timer
|
||||
let timer = Timer.publish(every: 1, on: .main, in: .common).autoconnect()
|
||||
@@ -173,7 +174,7 @@ struct ContentView: View {
|
||||
}
|
||||
|
||||
case .home:
|
||||
PostingTimelineView(damus_state: damus_state!, home: home, isSideBarOpened: $isSideBarOpened, active_sheet: $active_sheet, headerOffset: $headerOffset)
|
||||
PostingTimelineView(damus_state: damus_state!, home: home, homeEvents: home.events, isSideBarOpened: $isSideBarOpened, active_sheet: $active_sheet, headerOffset: $headerOffset)
|
||||
|
||||
case .notifications:
|
||||
NotificationsView(state: damus, notifications: home.notifications, subtitle: $menu_subtitle)
|
||||
@@ -195,6 +196,9 @@ struct ContentView: View {
|
||||
}
|
||||
}
|
||||
}
|
||||
.onAppear {
|
||||
notify(.display_tabbar(true))
|
||||
}
|
||||
}
|
||||
|
||||
func MaybeReportView(target: ReportTarget) -> some View {
|
||||
@@ -299,16 +303,20 @@ struct ContentView: View {
|
||||
.ignoresSafeArea(.keyboard)
|
||||
.edgesIgnoringSafeArea(hide_bar ? [.bottom] : [])
|
||||
.onAppear() {
|
||||
self.connect()
|
||||
try? AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playback, mode: .default, options: .mixWithOthers)
|
||||
setup_notifications()
|
||||
if !hasSeenOnboardingSuggestions || damus_state!.settings.always_show_onboarding_suggestions {
|
||||
active_sheet = .onboardingSuggestions
|
||||
hasSeenOnboardingSuggestions = true
|
||||
}
|
||||
self.appDelegate?.state = damus_state
|
||||
Task { // We probably don't need this to be a detached task. According to https://docs.swift.org/swift-book/documentation/the-swift-programming-language/concurrency/#Defining-and-Calling-Asynchronous-Functions, awaits are only suspension points that do not block the thread.
|
||||
await self.listenAndHandleLocalNotifications()
|
||||
Task {
|
||||
await self.connect()
|
||||
try? AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playback, mode: .default, options: .mixWithOthers)
|
||||
setup_notifications()
|
||||
if !hasSeenOnboardingSuggestions || damus_state!.settings.always_show_onboarding_suggestions {
|
||||
if damus_state.is_privkey_user {
|
||||
active_sheet = .onboardingSuggestions
|
||||
hasSeenOnboardingSuggestions = true
|
||||
}
|
||||
}
|
||||
self.appDelegate?.state = damus_state
|
||||
Task { // We probably don't need this to be a detached task. According to https://docs.swift.org/swift-book/documentation/the-swift-programming-language/concurrency/#Defining-and-Calling-Asynchronous-Functions, awaits are only suspension points that do not block the thread.
|
||||
await self.listenAndHandleLocalNotifications()
|
||||
}
|
||||
}
|
||||
}
|
||||
.sheet(item: $active_sheet) { item in
|
||||
@@ -370,7 +378,7 @@ struct ContentView: View {
|
||||
self.hide_bar = !show
|
||||
}
|
||||
.onReceive(timer) { n in
|
||||
self.damus_state?.nostrNetwork.postbox.try_flushing_events()
|
||||
Task{ await self.damus_state?.nostrNetwork.postbox.try_flushing_events() }
|
||||
self.damus_state!.profiles.profile_data(self.damus_state!.pubkey).status.try_expire()
|
||||
}
|
||||
.onReceive(handle_notify(.report)) { target in
|
||||
@@ -381,43 +389,46 @@ struct ContentView: View {
|
||||
self.confirm_mute = true
|
||||
}
|
||||
.onReceive(handle_notify(.attached_wallet)) { nwc in
|
||||
// update the lightning address on our profile when we attach a
|
||||
// wallet with an associated
|
||||
guard let ds = self.damus_state,
|
||||
let lud16 = nwc.lud16,
|
||||
let keypair = ds.keypair.to_full(),
|
||||
let profile_txn = ds.profiles.lookup(id: ds.pubkey),
|
||||
let profile = profile_txn.unsafeUnownedValue,
|
||||
lud16 != profile.lud16 else {
|
||||
return
|
||||
Task {
|
||||
try? await damus_state.nostrNetwork.userRelayList.load() // Reload relay list to apply changes
|
||||
|
||||
// update the lightning address on our profile when we attach a
|
||||
// wallet with an associated
|
||||
guard let ds = self.damus_state,
|
||||
let lud16 = nwc.lud16,
|
||||
let keypair = ds.keypair.to_full(),
|
||||
let profile = try? ds.profiles.lookup(id: ds.pubkey),
|
||||
lud16 != profile.lud16 else {
|
||||
return
|
||||
}
|
||||
|
||||
// clear zapper cache for old lud16
|
||||
if profile.lud16 != nil {
|
||||
// TODO: should this be somewhere else, where we process profile events!?
|
||||
invalidate_zapper_cache(pubkey: keypair.pubkey, profiles: ds.profiles, lnurl: ds.lnurls)
|
||||
}
|
||||
|
||||
let prof = Profile(name: profile.name, display_name: profile.display_name, about: profile.about, picture: profile.picture, banner: profile.banner, website: profile.website, lud06: profile.lud06, lud16: lud16, nip05: profile.nip05, damus_donation: profile.damus_donation, reactions: profile.reactions)
|
||||
|
||||
guard let ev = make_metadata_event(keypair: keypair, metadata: prof) else { return }
|
||||
await ds.nostrNetwork.postbox.send(ev)
|
||||
}
|
||||
|
||||
// clear zapper cache for old lud16
|
||||
if profile.lud16 != nil {
|
||||
// TODO: should this be somewhere else, where we process profile events!?
|
||||
invalidate_zapper_cache(pubkey: keypair.pubkey, profiles: ds.profiles, lnurl: ds.lnurls)
|
||||
}
|
||||
|
||||
let prof = Profile(name: profile.name, display_name: profile.display_name, about: profile.about, picture: profile.picture, banner: profile.banner, website: profile.website, lud06: profile.lud06, lud16: lud16, nip05: profile.nip05, damus_donation: profile.damus_donation, reactions: profile.reactions)
|
||||
|
||||
guard let ev = make_metadata_event(keypair: keypair, metadata: prof) else { return }
|
||||
ds.nostrNetwork.postbox.send(ev)
|
||||
}
|
||||
.onReceive(handle_notify(.broadcast)) { ev in
|
||||
guard let ds = self.damus_state else { return }
|
||||
|
||||
ds.nostrNetwork.postbox.send(ev)
|
||||
Task { await ds.nostrNetwork.postbox.send(ev) }
|
||||
}
|
||||
.onReceive(handle_notify(.unfollow)) { target in
|
||||
guard let state = self.damus_state else { return }
|
||||
_ = handle_unfollow(state: state, unfollow: target.follow_ref)
|
||||
Task { _ = await handle_unfollow(state: state, unfollow: target.follow_ref) }
|
||||
}
|
||||
.onReceive(handle_notify(.unfollowed)) { unfollow in
|
||||
home.resubscribe(.unfollowing(unfollow))
|
||||
}
|
||||
.onReceive(handle_notify(.follow)) { target in
|
||||
guard let state = self.damus_state else { return }
|
||||
handle_follow_notif(state: state, target: target)
|
||||
Task { await handle_follow_notif(state: state, target: target) }
|
||||
}
|
||||
.onReceive(handle_notify(.followed)) { _ in
|
||||
home.resubscribe(.following)
|
||||
@@ -428,8 +439,10 @@ struct ContentView: View {
|
||||
return
|
||||
}
|
||||
|
||||
if !handle_post_notification(keypair: keypair, postbox: state.nostrNetwork.postbox, events: state.events, post: post) {
|
||||
self.active_sheet = nil
|
||||
Task {
|
||||
if await !handle_post_notification(keypair: keypair, postbox: state.nostrNetwork.postbox, events: state.events, post: post) {
|
||||
self.active_sheet = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.new_mutes)) { _ in
|
||||
@@ -447,6 +460,9 @@ struct ContentView: View {
|
||||
.onReceive(handle_notify(.present_full_screen_item)) { item in
|
||||
self.active_full_screen_item = item
|
||||
}
|
||||
.onReceive(handle_notify(.favoriteUpdated)) { _ in
|
||||
home.resubscribe(.following)
|
||||
}
|
||||
.onReceive(handle_notify(.zapping)) { zap_ev in
|
||||
guard !zap_ev.is_custom else {
|
||||
return
|
||||
@@ -472,35 +488,35 @@ struct ContentView: View {
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.disconnect_relays)) { () in
|
||||
damus_state.nostrNetwork.pool.disconnect()
|
||||
Task { await damus_state.nostrNetwork.disconnectRelays() }
|
||||
}
|
||||
.onReceive(NotificationCenter.default.publisher(for: UIApplication.willEnterForegroundNotification)) { obj in
|
||||
print("txn: 📙 DAMUS ACTIVE NOTIFY")
|
||||
if damus_state.ndb.reopen() {
|
||||
print("txn: NOSTRDB REOPENED")
|
||||
} else {
|
||||
print("txn: NOSTRDB FAILED TO REOPEN closed:\(damus_state.ndb.is_closed)")
|
||||
}
|
||||
if damus_state.purple.checkout_ids_in_progress.count > 0 {
|
||||
// For extra assurance, run this after one second, to avoid race conditions if the app is also handling a damus purple welcome url.
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 1) {
|
||||
Task {
|
||||
let freshly_completed_checkout_ids = try? await damus_state.purple.check_status_of_checkouts_in_progress()
|
||||
let there_is_a_completed_checkout: Bool = (freshly_completed_checkout_ids?.count ?? 0) > 0
|
||||
let account_info = try await damus_state.purple.fetch_account(pubkey: self.keypair.pubkey)
|
||||
if there_is_a_completed_checkout == true && account_info?.active == true {
|
||||
if damus_state.purple.onboarding_status.user_has_never_seen_the_onboarding_before() {
|
||||
// Show welcome sheet
|
||||
self.active_sheet = .purple_onboarding
|
||||
}
|
||||
else {
|
||||
self.active_sheet = .purple(DamusPurpleURL.init(is_staging: damus_state.purple.environment == .staging, variant: .landing))
|
||||
Task {
|
||||
if damus_state.ndb.reopen() {
|
||||
print("txn: NOSTRDB REOPENED")
|
||||
} else {
|
||||
print("txn: NOSTRDB FAILED TO REOPEN closed:\(damus_state.ndb.is_closed)")
|
||||
}
|
||||
if damus_state.purple.checkout_ids_in_progress.count > 0 {
|
||||
// For extra assurance, run this after one second, to avoid race conditions if the app is also handling a damus purple welcome url.
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 1) {
|
||||
Task {
|
||||
let freshly_completed_checkout_ids = try? await damus_state.purple.check_status_of_checkouts_in_progress()
|
||||
let there_is_a_completed_checkout: Bool = (freshly_completed_checkout_ids?.count ?? 0) > 0
|
||||
let account_info = try await damus_state.purple.fetch_account(pubkey: self.keypair.pubkey)
|
||||
if there_is_a_completed_checkout == true && account_info?.active == true {
|
||||
if damus_state.purple.onboarding_status.user_has_never_seen_the_onboarding_before() {
|
||||
// Show welcome sheet
|
||||
self.active_sheet = .purple_onboarding
|
||||
}
|
||||
else {
|
||||
self.active_sheet = .purple(DamusPurpleURL.init(is_staging: damus_state.purple.environment == .staging, variant: .landing))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Task {
|
||||
await damus_state.purple.check_and_send_app_notifications_if_needed(handler: home.handle_damus_app_notification)
|
||||
}
|
||||
}
|
||||
@@ -509,8 +525,21 @@ struct ContentView: View {
|
||||
switch phase {
|
||||
case .background:
|
||||
print("txn: 📙 DAMUS BACKGROUNDED")
|
||||
Task { @MainActor in
|
||||
damus_state.ndb.close()
|
||||
let bgTask = this_app.beginBackgroundTask(withName: "Closing things down gracefully", expirationHandler: { [weak damus_state] in
|
||||
})
|
||||
|
||||
damusClosingTask = Task { @MainActor in
|
||||
Log.debug("App background signal handling: App being backgrounded", for: .app_lifecycle)
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
|
||||
// Stop periodic snapshots
|
||||
await damus_state.snapshotManager.stopPeriodicSnapshots()
|
||||
|
||||
await damus_state.nostrNetwork.handleAppBackgroundRequest() // Close ndb streaming tasks before closing ndb to avoid memory errors
|
||||
|
||||
Log.debug("App background signal handling: Nostr network manager closed after %.2f seconds", for: .app_lifecycle, CFAbsoluteTimeGetCurrent() - startTime)
|
||||
|
||||
this_app.endBackgroundTask(bgTask)
|
||||
}
|
||||
break
|
||||
case .inactive:
|
||||
@@ -518,26 +547,34 @@ struct ContentView: View {
|
||||
break
|
||||
case .active:
|
||||
print("txn: 📙 DAMUS ACTIVE")
|
||||
damus_state.nostrNetwork.pool.ping()
|
||||
Task {
|
||||
await damusClosingTask?.value // Wait for the closing task to finish before reopening things, to avoid race conditions
|
||||
damusClosingTask = nil
|
||||
await damus_state.nostrNetwork.handleAppForegroundRequest()
|
||||
|
||||
// Restart periodic snapshots when returning to foreground
|
||||
await damus_state.snapshotManager.startPeriodicSnapshots()
|
||||
}
|
||||
@unknown default:
|
||||
break
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.onlyzaps_mode)) { hide in
|
||||
home.filter_events()
|
||||
|
||||
guard let ds = damus_state,
|
||||
let profile_txn = ds.profiles.lookup(id: ds.pubkey),
|
||||
let profile = profile_txn.unsafeUnownedValue,
|
||||
let keypair = ds.keypair.to_full()
|
||||
else {
|
||||
return
|
||||
Task {
|
||||
home.filter_events()
|
||||
|
||||
guard let ds = damus_state,
|
||||
let profile = try? ds.profiles.lookup(id: ds.pubkey),
|
||||
let keypair = ds.keypair.to_full()
|
||||
else {
|
||||
return
|
||||
}
|
||||
|
||||
let prof = Profile(name: profile.name, display_name: profile.display_name, about: profile.about, picture: profile.picture, banner: profile.banner, website: profile.website, lud06: profile.lud06, lud16: profile.lud16, nip05: profile.nip05, damus_donation: profile.damus_donation, reactions: !hide)
|
||||
|
||||
guard let profile_ev = make_metadata_event(keypair: keypair, metadata: prof) else { return }
|
||||
await ds.nostrNetwork.postbox.send(profile_ev)
|
||||
}
|
||||
|
||||
let prof = Profile(name: profile.name, display_name: profile.display_name, about: profile.about, picture: profile.picture, banner: profile.banner, website: profile.website, lud06: profile.lud06, lud16: profile.lud16, nip05: profile.nip05, damus_donation: profile.damus_donation, reactions: !hide)
|
||||
|
||||
guard let profile_ev = make_metadata_event(keypair: keypair, metadata: prof) else { return }
|
||||
ds.nostrNetwork.postbox.send(profile_ev)
|
||||
}
|
||||
.alert(NSLocalizedString("User muted", comment: "Alert message to indicate the user has been muted"), isPresented: $user_muted_confirm, actions: {
|
||||
Button(NSLocalizedString("Thanks!", comment: "Button to close out of alert that informs that the action to muted a user was successful.")) {
|
||||
@@ -545,8 +582,7 @@ struct ContentView: View {
|
||||
}
|
||||
}, message: {
|
||||
if case let .user(pubkey, _) = self.muting {
|
||||
let profile_txn = damus_state!.profiles.lookup(id: pubkey)
|
||||
let profile = profile_txn?.unsafeUnownedValue
|
||||
let profile = try? damus_state!.profiles.lookup(id: pubkey)
|
||||
let name = Profile.displayName(profile: profile, pubkey: pubkey).username.truncate(maxLength: 50)
|
||||
Text("\(name) has been muted", comment: "Alert message that informs a user was muted.")
|
||||
} else {
|
||||
@@ -560,20 +596,22 @@ struct ContentView: View {
|
||||
}
|
||||
|
||||
Button(NSLocalizedString("Yes, Overwrite", comment: "Text of button that confirms to overwrite the existing mutelist.")) {
|
||||
guard let ds = damus_state,
|
||||
let keypair = ds.keypair.to_full(),
|
||||
let muting,
|
||||
let mutelist = create_or_update_mutelist(keypair: keypair, mprev: nil, to_add: muting)
|
||||
else {
|
||||
return
|
||||
Task {
|
||||
guard let ds = damus_state,
|
||||
let keypair = ds.keypair.to_full(),
|
||||
let muting,
|
||||
let mutelist = create_or_update_mutelist(keypair: keypair, mprev: nil, to_add: muting)
|
||||
else {
|
||||
return
|
||||
}
|
||||
|
||||
ds.mutelist_manager.set_mutelist(mutelist)
|
||||
await ds.nostrNetwork.postbox.send(mutelist)
|
||||
|
||||
confirm_overwrite_mutelist = false
|
||||
confirm_mute = false
|
||||
user_muted_confirm = true
|
||||
}
|
||||
|
||||
ds.mutelist_manager.set_mutelist(mutelist)
|
||||
ds.nostrNetwork.postbox.send(mutelist)
|
||||
|
||||
confirm_overwrite_mutelist = false
|
||||
confirm_mute = false
|
||||
user_muted_confirm = true
|
||||
}
|
||||
}, message: {
|
||||
Text("No mute list found, create a new one? This will overwrite any previous mute lists.", comment: "Alert message prompt that asks if the user wants to create a new mute list, overwriting previous mute lists.")
|
||||
@@ -587,6 +625,10 @@ struct ContentView: View {
|
||||
return
|
||||
}
|
||||
|
||||
if ds.mutelist_manager.event == nil {
|
||||
home.load_latest_mutelist_event_from_damus_state()
|
||||
}
|
||||
|
||||
if ds.mutelist_manager.event == nil {
|
||||
confirm_overwrite_mutelist = true
|
||||
} else {
|
||||
@@ -601,13 +643,12 @@ struct ContentView: View {
|
||||
}
|
||||
|
||||
ds.mutelist_manager.set_mutelist(ev)
|
||||
ds.nostrNetwork.postbox.send(ev)
|
||||
Task { await ds.nostrNetwork.postbox.send(ev) }
|
||||
}
|
||||
}
|
||||
}, message: {
|
||||
if case let .user(pubkey, _) = muting {
|
||||
let profile_txn = damus_state?.profiles.lookup(id: pubkey)
|
||||
let profile = profile_txn?.unsafeUnownedValue
|
||||
let profile = try? damus_state?.profiles.lookup(id: pubkey)
|
||||
let name = Profile.displayName(profile: profile, pubkey: pubkey).username.truncate(maxLength: 50)
|
||||
Text("Mute \(name)?", comment: "Alert message prompt to ask if a user should be muted.")
|
||||
} else {
|
||||
@@ -653,7 +694,7 @@ struct ContentView: View {
|
||||
self.execute_open_action(openAction)
|
||||
}
|
||||
|
||||
func connect() {
|
||||
func connect() async {
|
||||
// nostrdb
|
||||
var mndb = Ndb()
|
||||
if mndb == nil {
|
||||
@@ -675,12 +716,13 @@ struct ContentView: View {
|
||||
|
||||
let settings = UserSettingsStore.globally_load_for(pubkey: pubkey)
|
||||
|
||||
let new_relay_filters = load_relay_filters(pubkey) == nil
|
||||
let new_relay_filters = await load_relay_filters(pubkey) == nil
|
||||
|
||||
self.damus_state = DamusState(keypair: keypair,
|
||||
likes: EventCounter(our_pubkey: pubkey),
|
||||
boosts: EventCounter(our_pubkey: pubkey),
|
||||
contacts: Contacts(our_pubkey: pubkey),
|
||||
contactCards: ContactCardManager(),
|
||||
mutelist_manager: MutelistManager(user_keypair: keypair),
|
||||
profiles: Profiles(ndb: ndb),
|
||||
dms: home.dms,
|
||||
@@ -706,6 +748,8 @@ struct ContentView: View {
|
||||
|
||||
home.damus_state = self.damus_state!
|
||||
|
||||
await damus_state.snapshotManager.startPeriodicSnapshots()
|
||||
|
||||
if let damus_state, damus_state.purple.enable_purple {
|
||||
// Assign delegate so that we can send receipts to the Purple API server as soon as we get updates from user's purchases
|
||||
StoreObserver.standard.delegate = damus_state.purple
|
||||
@@ -717,8 +761,7 @@ struct ContentView: View {
|
||||
// Purple API is an experimental feature. If not enabled, do not connect `StoreObserver` with Purple API to avoid leaking receipts
|
||||
}
|
||||
|
||||
damus_state.nostrNetwork.pool.register_handler(sub_id: sub_id, handler: home.handle_event)
|
||||
damus_state.nostrNetwork.connect()
|
||||
|
||||
|
||||
if #available(iOS 17, *) {
|
||||
if damus_state.settings.developer_mode && damus_state.settings.reset_tips_on_launch {
|
||||
@@ -734,29 +777,36 @@ struct ContentView: View {
|
||||
Log.error("Failed to configure tips: %s", for: .tips, error.localizedDescription)
|
||||
}
|
||||
}
|
||||
await damus_state.nostrNetwork.connect()
|
||||
// TODO: Move this to a better spot. Not sure what is the best signal to listen to for sending initial filters
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 0.25, execute: {
|
||||
self.home.send_initial_filters()
|
||||
})
|
||||
}
|
||||
|
||||
func music_changed(_ state: MusicState) {
|
||||
guard let damus_state else { return }
|
||||
switch state {
|
||||
case .playback_state:
|
||||
break
|
||||
case .song(let song):
|
||||
guard let song, let kp = damus_state.keypair.to_full() else { return }
|
||||
|
||||
let pdata = damus_state.profiles.profile_data(damus_state.pubkey)
|
||||
|
||||
let desc = "\(song.title ?? "Unknown") - \(song.artist ?? "Unknown")"
|
||||
let encodedDesc = desc.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed)
|
||||
let url = encodedDesc.flatMap { enc in
|
||||
URL(string: "spotify:search:\(enc)")
|
||||
Task {
|
||||
guard let damus_state else { return }
|
||||
switch state {
|
||||
case .playback_state:
|
||||
break
|
||||
case .song(let song):
|
||||
guard let song, let kp = damus_state.keypair.to_full() else { return }
|
||||
|
||||
let pdata = damus_state.profiles.profile_data(damus_state.pubkey)
|
||||
|
||||
let desc = "\(song.title ?? "Unknown") - \(song.artist ?? "Unknown")"
|
||||
let encodedDesc = desc.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed)
|
||||
let url = encodedDesc.flatMap { enc in
|
||||
URL(string: "spotify:search:\(enc)")
|
||||
}
|
||||
let music = UserStatus(type: .music, expires_at: Date.now.addingTimeInterval(song.playbackDuration), content: desc, created_at: UInt32(Date.now.timeIntervalSince1970), url: url)
|
||||
|
||||
pdata.status.music = music
|
||||
|
||||
guard let ev = music.to_note(keypair: kp) else { return }
|
||||
await damus_state.nostrNetwork.postbox.send(ev)
|
||||
}
|
||||
let music = UserStatus(type: .music, expires_at: Date.now.addingTimeInterval(song.playbackDuration), content: desc, created_at: UInt32(Date.now.timeIntervalSince1970), url: url)
|
||||
|
||||
pdata.status.music = music
|
||||
|
||||
guard let ev = music.to_note(keypair: kp) else { return }
|
||||
damus_state.nostrNetwork.postbox.send(ev)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -806,7 +856,7 @@ struct TopbarSideMenuButton: View {
|
||||
Button {
|
||||
isSideBarOpened.toggle()
|
||||
} label: {
|
||||
ProfilePicView(pubkey: damus_state.pubkey, size: 32, highlight: .none, profiles: damus_state.profiles, disable_animation: damus_state.settings.disable_animation)
|
||||
ProfilePicView(pubkey: damus_state.pubkey, size: 32, highlight: .none, profiles: damus_state.profiles, disable_animation: damus_state.settings.disable_animation, damusState: damus_state)
|
||||
.opacity(isSideBarOpened ? 0 : 1)
|
||||
.animation(isSideBarOpened ? .none : .default, value: isSideBarOpened)
|
||||
.accessibilityHidden(true) // Knowing there is a profile picture here leads to no actionable outcome to VoiceOver users, so it is best not to show it
|
||||
@@ -908,7 +958,7 @@ func update_filters_with_since(last_of_kind: [UInt32: NostrEvent], filters: [Nos
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@MainActor
|
||||
func setup_notifications() {
|
||||
this_app.registerForRemoteNotifications()
|
||||
let center = UNUserNotificationCenter.current()
|
||||
@@ -943,169 +993,11 @@ enum FindEventType {
|
||||
}
|
||||
|
||||
enum FoundEvent {
|
||||
// TODO: Why not return the profile record itself? Right now the code probably just wants to trigger ndb to ingest the profile record and be available at ndb in parallel, but it would be cleaner if the function that uses this simply does that ndb query on their behalf.
|
||||
case profile(Pubkey)
|
||||
case event(NostrEvent)
|
||||
}
|
||||
|
||||
/// Finds an event from NostrDB if it exists, or from the network
|
||||
///
|
||||
/// This is the callback version. There is also an asyc/await version of this function.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - state: Damus state
|
||||
/// - query_: The query, including the event being looked for, and the relays to use when looking
|
||||
/// - callback: The function to call with results
|
||||
func find_event(state: DamusState, query query_: FindEvent, callback: @escaping (FoundEvent?) -> ()) {
|
||||
return find_event_with_subid(state: state, query: query_, subid: UUID().description, callback: callback)
|
||||
}
|
||||
|
||||
/// Finds an event from NostrDB if it exists, or from the network
|
||||
///
|
||||
/// This is a the async/await version of `find_event`. Use this when using callbacks is impossible or cumbersome.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - state: Damus state
|
||||
/// - query_: The query, including the event being looked for, and the relays to use when looking
|
||||
/// - callback: The function to call with results
|
||||
func find_event(state: DamusState, query query_: FindEvent) async -> FoundEvent? {
|
||||
await withCheckedContinuation { continuation in
|
||||
find_event(state: state, query: query_) { event in
|
||||
var already_resumed = false
|
||||
if !already_resumed { // Ensure we do not resume twice, as it causes a crash
|
||||
continuation.resume(returning: event)
|
||||
already_resumed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func find_event_with_subid(state: DamusState, query query_: FindEvent, subid: String, callback: @escaping (FoundEvent?) -> ()) {
|
||||
|
||||
var filter: NostrFilter? = nil
|
||||
let find_from = query_.find_from
|
||||
let query = query_.type
|
||||
|
||||
switch query {
|
||||
case .profile(let pubkey):
|
||||
if let profile_txn = state.ndb.lookup_profile(pubkey),
|
||||
let record = profile_txn.unsafeUnownedValue,
|
||||
record.profile != nil
|
||||
{
|
||||
callback(.profile(pubkey))
|
||||
return
|
||||
}
|
||||
filter = NostrFilter(kinds: [.metadata], limit: 1, authors: [pubkey])
|
||||
|
||||
case .event(let evid):
|
||||
if let ev = state.events.lookup(evid) {
|
||||
callback(.event(ev))
|
||||
return
|
||||
}
|
||||
|
||||
filter = NostrFilter(ids: [evid], limit: 1)
|
||||
}
|
||||
|
||||
var attempts: Int = 0
|
||||
var has_event = false
|
||||
guard let filter else { return }
|
||||
|
||||
state.nostrNetwork.pool.subscribe_to(sub_id: subid, filters: [filter], to: find_from) { relay_id, res in
|
||||
guard case .nostr_event(let ev) = res else {
|
||||
return
|
||||
}
|
||||
|
||||
guard ev.subid == subid else {
|
||||
return
|
||||
}
|
||||
|
||||
switch ev {
|
||||
case .ok:
|
||||
break
|
||||
case .event(_, let ev):
|
||||
has_event = true
|
||||
state.nostrNetwork.pool.unsubscribe(sub_id: subid)
|
||||
|
||||
switch query {
|
||||
case .profile:
|
||||
if ev.known_kind == .metadata {
|
||||
callback(.profile(ev.pubkey))
|
||||
}
|
||||
case .event:
|
||||
callback(.event(ev))
|
||||
}
|
||||
case .eose:
|
||||
if !has_event {
|
||||
attempts += 1
|
||||
if attempts >= state.nostrNetwork.pool.our_descriptors.count {
|
||||
callback(nil) // If we could not find any events in any of the relays we are connected to, send back nil
|
||||
}
|
||||
}
|
||||
state.nostrNetwork.pool.unsubscribe(sub_id: subid, to: [relay_id]) // We are only finding an event once, so close subscription on eose
|
||||
case .notice:
|
||||
break
|
||||
case .auth:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Finds a replaceable event based on an `naddr` address.
|
||||
///
|
||||
/// This is the callback version of the function. There is another function that makes use of async/await
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - damus_state: The Damus state
|
||||
/// - naddr: the `naddr` address
|
||||
/// - callback: A function to handle the found event
|
||||
func naddrLookup(damus_state: DamusState, naddr: NAddr, callback: @escaping (NostrEvent?) -> ()) {
|
||||
let nostrKinds: [NostrKind]? = NostrKind(rawValue: naddr.kind).map { [$0] }
|
||||
|
||||
let filter = NostrFilter(kinds: nostrKinds, authors: [naddr.author])
|
||||
|
||||
let subid = UUID().description
|
||||
|
||||
damus_state.nostrNetwork.pool.subscribe_to(sub_id: subid, filters: [filter], to: nil) { relay_id, res in
|
||||
guard case .nostr_event(let ev) = res else {
|
||||
damus_state.nostrNetwork.pool.unsubscribe(sub_id: subid, to: [relay_id])
|
||||
return
|
||||
}
|
||||
|
||||
if case .event(_, let ev) = ev {
|
||||
for tag in ev.tags {
|
||||
if(tag.count >= 2 && tag[0].string() == "d"){
|
||||
if (tag[1].string() == naddr.identifier){
|
||||
damus_state.nostrNetwork.pool.unsubscribe(sub_id: subid, to: [relay_id])
|
||||
callback(ev)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
damus_state.nostrNetwork.pool.unsubscribe(sub_id: subid, to: [relay_id])
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds a replaceable event based on an `naddr` address.
|
||||
///
|
||||
/// This is the async/await version of the function. Another version of this function which makes use of callback functions also exists .
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - damus_state: The Damus state
|
||||
/// - naddr: the `naddr` address
|
||||
/// - callback: A function to handle the found event
|
||||
func naddrLookup(damus_state: DamusState, naddr: NAddr) async -> NostrEvent? {
|
||||
await withCheckedContinuation { continuation in
|
||||
var already_resumed = false
|
||||
naddrLookup(damus_state: damus_state, naddr: naddr) { event in
|
||||
if !already_resumed { // Ensure we do not resume twice, as it causes a crash
|
||||
continuation.resume(returning: event)
|
||||
already_resumed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func timeline_name(_ timeline: Timeline?) -> String {
|
||||
guard let timeline else {
|
||||
return ""
|
||||
@@ -1123,14 +1015,15 @@ func timeline_name(_ timeline: Timeline?) -> String {
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
func handle_unfollow(state: DamusState, unfollow: FollowRef) -> Bool {
|
||||
@MainActor
|
||||
func handle_unfollow(state: DamusState, unfollow: FollowRef) async -> Bool {
|
||||
guard let keypair = state.keypair.to_full() else {
|
||||
return false
|
||||
}
|
||||
|
||||
let old_contacts = state.contacts.event
|
||||
|
||||
guard let ev = unfollow_reference(postbox: state.nostrNetwork.postbox, our_contacts: old_contacts, keypair: keypair, unfollow: unfollow)
|
||||
guard let ev = await unfollow_reference(postbox: state.nostrNetwork.postbox, our_contacts: old_contacts, keypair: keypair, unfollow: unfollow)
|
||||
else {
|
||||
return false
|
||||
}
|
||||
@@ -1151,12 +1044,13 @@ func handle_unfollow(state: DamusState, unfollow: FollowRef) -> Bool {
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
func handle_follow(state: DamusState, follow: FollowRef) -> Bool {
|
||||
@MainActor
|
||||
func handle_follow(state: DamusState, follow: FollowRef) async -> Bool {
|
||||
guard let keypair = state.keypair.to_full() else {
|
||||
return false
|
||||
}
|
||||
|
||||
guard let ev = follow_reference(box: state.nostrNetwork.postbox, our_contacts: state.contacts.event, keypair: keypair, follow: follow)
|
||||
guard let ev = await follow_reference(box: state.nostrNetwork.postbox, our_contacts: state.contacts.event, keypair: keypair, follow: follow)
|
||||
else {
|
||||
return false
|
||||
}
|
||||
@@ -1176,18 +1070,18 @@ func handle_follow(state: DamusState, follow: FollowRef) -> Bool {
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
func handle_follow_notif(state: DamusState, target: FollowTarget) -> Bool {
|
||||
func handle_follow_notif(state: DamusState, target: FollowTarget) async -> Bool {
|
||||
switch target {
|
||||
case .pubkey(let pk):
|
||||
state.contacts.add_friend_pubkey(pk)
|
||||
await state.contacts.add_friend_pubkey(pk)
|
||||
case .contact(let ev):
|
||||
state.contacts.add_friend_contact(ev)
|
||||
await state.contacts.add_friend_contact(ev)
|
||||
}
|
||||
|
||||
return handle_follow(state: state, follow: target.follow_ref)
|
||||
return await handle_follow(state: state, follow: target.follow_ref)
|
||||
}
|
||||
|
||||
func handle_post_notification(keypair: FullKeypair, postbox: PostBox, events: EventCache, post: NostrPostResult) -> Bool {
|
||||
func handle_post_notification(keypair: FullKeypair, postbox: PostBox, events: EventCache, post: NostrPostResult) async -> Bool {
|
||||
switch post {
|
||||
case .post(let post):
|
||||
//let post = tup.0
|
||||
@@ -1196,17 +1090,17 @@ func handle_post_notification(keypair: FullKeypair, postbox: PostBox, events: Ev
|
||||
guard let new_ev = post.to_event(keypair: keypair) else {
|
||||
return false
|
||||
}
|
||||
postbox.send(new_ev)
|
||||
await postbox.send(new_ev)
|
||||
for eref in new_ev.referenced_ids.prefix(3) {
|
||||
// also broadcast at most 3 referenced events
|
||||
if let ev = events.lookup(eref) {
|
||||
postbox.send(ev)
|
||||
await postbox.send(ev)
|
||||
}
|
||||
}
|
||||
for qref in new_ev.referenced_quote_ids.prefix(3) {
|
||||
// also broadcast at most 3 referenced quoted events
|
||||
if let ev = events.lookup(qref.note_id) {
|
||||
postbox.send(ev)
|
||||
await postbox.send(ev)
|
||||
}
|
||||
}
|
||||
return true
|
||||
@@ -1220,8 +1114,8 @@ extension LossyLocalNotification {
|
||||
/// Computes a view open action from a mention reference.
|
||||
/// Use this when opening a user-presentable interface to a specific mention reference.
|
||||
func toViewOpenAction() -> ContentView.ViewOpenAction {
|
||||
switch self.mention {
|
||||
case .pubkey(let pubkey):
|
||||
switch self.mention.nip19 {
|
||||
case .npub(let pubkey):
|
||||
return .route(.ProfileByKey(pubkey: pubkey))
|
||||
case .note(let noteId):
|
||||
return .route(.LoadableNostrEvent(note_reference: .note_id(noteId)))
|
||||
@@ -1241,14 +1135,21 @@ extension LossyLocalNotification {
|
||||
)))
|
||||
case .naddr(let nAddr):
|
||||
return .route(.LoadableNostrEvent(note_reference: .naddr(nAddr)))
|
||||
case .nsec(_):
|
||||
// `nsec` urls are a terrible idea security-wise, so we should intentionally not support those — in order to discourage their use.
|
||||
return .sheet(.error(ErrorView.UserPresentableError(
|
||||
user_visible_description: NSLocalizedString("You opened an invalid link. The link you tried to open refers to \"nsec\", which is not supported.", comment: "User-visible error description for a user who tries to open an unsupported \"nsec\" link."),
|
||||
tip: NSLocalizedString("Please contact the person who provided the link, and ask for another link. Also, this link may have sensitive information, please use caution before sharing it.", comment: "User-visible tip on what to do if a link contains an unsupported \"nsec\" reference."),
|
||||
technical_info: "`MentionRef.toViewOpenAction` detected unsupported `nsec` contents"
|
||||
)))
|
||||
case .nscript(let script):
|
||||
return .route(.Script(script: ScriptModel(data: script, state: .not_loaded)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func logout(_ state: DamusState?)
|
||||
{
|
||||
state?.close()
|
||||
notify(.logout)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,11 @@ struct NIP04 {}
|
||||
extension NIP04 {
|
||||
/// Encrypts a message using NIP-04.
|
||||
static func encrypt_message(message: String, privkey: Privkey, to_pk: Pubkey, encoding: EncEncoding = .base64) -> String? {
|
||||
let iv = random_bytes(count: 16).bytes
|
||||
let iv = random_bytes(count: 16).byteArray
|
||||
guard let shared_sec = get_shared_secret(privkey: privkey, pubkey: to_pk) else {
|
||||
return nil
|
||||
}
|
||||
let utf8_message = Data(message.utf8).bytes
|
||||
let utf8_message = Data(message.utf8).byteArray
|
||||
guard let enc_message = aes_encrypt(data: utf8_message, iv: iv, shared_sec: shared_sec) else {
|
||||
return nil
|
||||
}
|
||||
@@ -42,6 +42,10 @@ extension NIP65 {
|
||||
self.relays = Self.relayOrderedDictionary(from: relays)
|
||||
}
|
||||
|
||||
init() {
|
||||
self.relays = Self.relayOrderedDictionary(from: [])
|
||||
}
|
||||
|
||||
init(relays: [RelayURL]) {
|
||||
let relayItemList = relays.map({ RelayItem(url: $0, rwConfiguration: .readWrite) })
|
||||
self.relays = Self.relayOrderedDictionary(from: relayItemList)
|
||||
@@ -0,0 +1,215 @@
|
||||
//
|
||||
// NostrNetworkManager.swift
|
||||
// damus
|
||||
//
|
||||
// Created by Daniel D’Aquino on 2025-02-26.
|
||||
//
|
||||
import Foundation
|
||||
|
||||
/// Manages interactions with the Nostr Network.
|
||||
///
|
||||
/// This delineates a layer that is responsible for doing mid-level management of interactions with the Nostr network, controlling lower-level classes that perform more network/DB specific code, and providing an easier to use and more semantic interfaces for the rest of the app.
|
||||
///
|
||||
/// This is responsible for:
|
||||
/// - Managing the user's relay list
|
||||
/// - Establishing a `RelayPool` and maintaining it in sync with the user's relay list as it changes
|
||||
/// - Abstracting away complexities of interacting with the nostr network, providing an easier-to-use interface to fetch and send content related to the Nostr network
|
||||
///
|
||||
/// This is **NOT** responsible for:
|
||||
/// - Doing actual storage of relay list (delegated via the delegate
|
||||
/// - Handling low-level relay logic (this will be delegated to lower level classes used in RelayPool/RelayConnection)
|
||||
class NostrNetworkManager {
|
||||
/// The relay pool that we manage
|
||||
///
|
||||
/// ## Implementation notes
|
||||
///
|
||||
/// - This will be marked `private` in the future to prevent other code from accessing the relay pool directly. Code outside this layer should use a higher level interface
|
||||
private let pool: RelayPool // TODO: Make this private and make higher level interface for classes outside the NostrNetworkManager
|
||||
/// A delegate that allows us to interact with the rest of app without introducing hard or circular dependencies
|
||||
private var delegate: Delegate
|
||||
/// Manages the user's relay list, controls RelayPool's connected relays
|
||||
let userRelayList: UserRelayListManager
|
||||
/// Handles sending out notes to the network
|
||||
let postbox: PostBox
|
||||
/// Handles subscriptions and functions to read or consume data from the Nostr network
|
||||
let reader: SubscriptionManager
|
||||
let profilesManager: ProfilesManager
|
||||
|
||||
init(delegate: Delegate, addNdbToRelayPool: Bool = true) {
|
||||
self.delegate = delegate
|
||||
let pool = RelayPool(ndb: addNdbToRelayPool ? delegate.ndb : nil, keypair: delegate.keypair)
|
||||
self.pool = pool
|
||||
let reader = SubscriptionManager(pool: pool, ndb: delegate.ndb, experimentalLocalRelayModelSupport: self.delegate.experimentalLocalRelayModelSupport)
|
||||
let userRelayList = UserRelayListManager(delegate: delegate, pool: pool, reader: reader)
|
||||
self.reader = reader
|
||||
self.userRelayList = userRelayList
|
||||
self.postbox = PostBox(pool: pool)
|
||||
self.profilesManager = ProfilesManager(subscriptionManager: reader, ndb: delegate.ndb)
|
||||
}
|
||||
|
||||
// MARK: - Control and lifecycle functions
|
||||
|
||||
/// Connects the app to the Nostr network
|
||||
func connect() async {
|
||||
await self.userRelayList.connect() // Will load the user's list, apply it, and get RelayPool to connect to it.
|
||||
await self.profilesManager.load()
|
||||
}
|
||||
|
||||
func disconnectRelays() async {
|
||||
await self.pool.disconnect()
|
||||
}
|
||||
|
||||
func handleAppBackgroundRequest() async {
|
||||
await self.reader.cancelAllTasks()
|
||||
await self.pool.cleanQueuedRequestForSessionEnd()
|
||||
}
|
||||
|
||||
func handleAppForegroundRequest() async {
|
||||
// Pinging the network will automatically reconnect any dead websocket connections
|
||||
await self.ping()
|
||||
}
|
||||
|
||||
func close() async {
|
||||
await withTaskGroup { group in
|
||||
// Spawn each cancellation task in parallel for faster execution speed
|
||||
group.addTask {
|
||||
await self.reader.cancelAllTasks()
|
||||
}
|
||||
group.addTask {
|
||||
await self.profilesManager.stop()
|
||||
}
|
||||
// But await on each one to prevent race conditions
|
||||
for await value in group { continue }
|
||||
await pool.close()
|
||||
}
|
||||
}
|
||||
|
||||
func ping() async {
|
||||
await self.pool.ping()
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func relaysForEvent(event: NostrEvent) async -> [RelayURL] {
|
||||
// TODO(tyiu) Ideally this list would be sorted by the event author's outbox relay preferences
|
||||
// and reliability of relays to maximize chances of others finding this event.
|
||||
if let relays = await pool.seen[event.id] {
|
||||
return Array(relays)
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
// TODO: ORGANIZE THESE
|
||||
|
||||
// MARK: - Communication with the Nostr Network
|
||||
/// ## Implementation notes
|
||||
///
|
||||
/// - This class hides the relay pool on purpose to avoid other code from dealing with complex relay + nostrDB logic.
|
||||
/// - Instead, we provide an easy to use interface so that normal code can just get the info they want.
|
||||
/// - This is also to help us migrate to the relay model.
|
||||
// TODO: Define a better interface. This is a temporary scaffold to replace direct relay pool access. After that is done, we can refactor this interface to be cleaner and reduce non-sense.
|
||||
|
||||
func sendToNostrDB(event: NostrEvent) async {
|
||||
await self.pool.send_raw_to_local_ndb(.typical(.event(event)))
|
||||
}
|
||||
|
||||
func send(event: NostrEvent, to targetRelays: [RelayURL]? = nil, skipEphemeralRelays: Bool = true) async {
|
||||
await self.pool.send(.event(event), to: targetRelays, skip_ephemeral: skipEphemeralRelays)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func getRelay(_ id: RelayURL) -> RelayPool.Relay? {
|
||||
pool.get_relay(id)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var connectedRelays: [RelayPool.Relay] {
|
||||
self.pool.relays
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var ourRelayDescriptors: [RelayPool.RelayDescriptor] {
|
||||
self.pool.our_descriptors
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func relayURLsThatSawNote(id: NoteId) async -> Set<RelayURL>? {
|
||||
return await self.pool.seen[id]
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func determineToRelays(filters: RelayFilters) -> [RelayURL] {
|
||||
return self.pool.our_descriptors
|
||||
.map { $0.url }
|
||||
.filter { !filters.is_filtered(timeline: .search, relay_id: $0) }
|
||||
}
|
||||
|
||||
// MARK: NWC
|
||||
// TODO: Move this to NWCManager
|
||||
|
||||
@discardableResult
|
||||
func nwcPay(url: WalletConnectURL, post: PostBox, invoice: String, delay: TimeInterval? = 5.0, on_flush: OnFlush? = nil, zap_request: NostrEvent? = nil) async -> NostrEvent? {
|
||||
await WalletConnect.pay(url: url, pool: self.pool, post: post, invoice: invoice, zap_request: nil)
|
||||
}
|
||||
|
||||
/// Send a donation zap to the Damus team
|
||||
func send_donation_zap(nwc: WalletConnectURL, percent: Int, base_msats: Int64) async {
|
||||
let percent_f = Double(percent) / 100.0
|
||||
let donations_msats = Int64(percent_f * Double(base_msats))
|
||||
|
||||
let payreq = LNUrlPayRequest(allowsNostr: true, commentAllowed: nil, nostrPubkey: "", callback: "https://sendsats.lol/@damus")
|
||||
guard let invoice = await fetch_zap_invoice(payreq, zapreq: nil, msats: donations_msats, zap_type: .non_zap, comment: nil) else {
|
||||
// we failed... oh well. no donation for us.
|
||||
print("damus-donation failed to fetch invoice")
|
||||
return
|
||||
}
|
||||
|
||||
print("damus-donation donating...")
|
||||
await WalletConnect.pay(url: nwc, pool: self.pool, post: self.postbox, invoice: invoice, zap_request: nil, delay: nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Helper types
|
||||
|
||||
extension NostrNetworkManager {
|
||||
/// The delegate that provides information and structure for the `NostrNetworkManager` to function.
|
||||
///
|
||||
/// ## Implementation notes
|
||||
///
|
||||
/// This is needed to prevent a circular reference between `DamusState` and `NostrNetworkManager`, and reduce coupling.
|
||||
protocol Delegate: Sendable {
|
||||
/// NostrDB instance, used with `RelayPool` to send events for ingestion.
|
||||
var ndb: Ndb { get }
|
||||
|
||||
/// The keypair to use for relay authentication and updating relay lists
|
||||
var keypair: Keypair { get }
|
||||
|
||||
/// The latest relay list event id hex
|
||||
var latestRelayListEventIdHex: String? { get set } // TODO: Update this once we have full NostrDB query support
|
||||
|
||||
/// The latest contact list `NostrEvent`
|
||||
///
|
||||
/// Note: Read-only access, because `NostrNetworkManager` does not manage contact lists.
|
||||
@MainActor
|
||||
var latestContactListEvent: NostrEvent? { get }
|
||||
|
||||
/// Default bootstrap relays to start with when a user relay list is not present
|
||||
var bootstrapRelays: [RelayURL] { get }
|
||||
|
||||
/// Whether the app is in developer mode
|
||||
var developerMode: Bool { get }
|
||||
|
||||
/// Whether the app has the experimental local relay model flag that streams data only from the local relay (ndb)
|
||||
var experimentalLocalRelayModelSupport: Bool { get }
|
||||
|
||||
/// The cache of relay model information
|
||||
var relayModelCache: RelayModelCache { get }
|
||||
|
||||
/// Relay filters
|
||||
var relayFilters: RelayFilters { get }
|
||||
|
||||
/// The user's connected NWC wallet
|
||||
var nwcWallet: WalletConnectURL? { get }
|
||||
}
|
||||
}
|
||||
225
damus/Core/Networking/NostrNetworkManager/ProfilesManager.swift
Normal file
225
damus/Core/Networking/NostrNetworkManager/ProfilesManager.swift
Normal file
@@ -0,0 +1,225 @@
|
||||
//
|
||||
// ProfilesManager.swift
|
||||
// damus
|
||||
//
|
||||
// Created by Daniel D’Aquino on 2025-09-19.
|
||||
//
|
||||
import Foundation
|
||||
|
||||
extension NostrNetworkManager {
|
||||
/// Efficiently manages getting profile metadata from the network and NostrDB without too many relay subscriptions
|
||||
///
|
||||
/// This is necessary because relays have a limit on how many subscriptions can be sent to relays at one given time.
|
||||
actor ProfilesManager {
|
||||
private var profileListenerTask: Task<Void, any Error>? = nil
|
||||
private var subscriptionSwitcherTask: Task<Void, any Error>? = nil
|
||||
private var subscriptionNeedsUpdate: Bool = false
|
||||
private let subscriptionManager: SubscriptionManager
|
||||
private let ndb: Ndb
|
||||
private var streams: [Pubkey: [UUID: ProfileStreamInfo]]
|
||||
|
||||
|
||||
// MARK: - Initialization and deinitialization
|
||||
|
||||
init(subscriptionManager: SubscriptionManager, ndb: Ndb) {
|
||||
self.subscriptionManager = subscriptionManager
|
||||
self.ndb = ndb
|
||||
self.streams = [:]
|
||||
}
|
||||
|
||||
deinit {
|
||||
self.subscriptionSwitcherTask?.cancel()
|
||||
self.profileListenerTask?.cancel()
|
||||
}
|
||||
|
||||
// MARK: - Task management
|
||||
|
||||
func load() {
|
||||
self.restartProfileListenerTask()
|
||||
self.subscriptionSwitcherTask?.cancel()
|
||||
self.subscriptionSwitcherTask = Task {
|
||||
while true {
|
||||
try await Task.sleep(for: .seconds(1))
|
||||
try Task.checkCancellation()
|
||||
if subscriptionNeedsUpdate {
|
||||
try Task.checkCancellation()
|
||||
self.restartProfileListenerTask()
|
||||
subscriptionNeedsUpdate = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stop() async {
|
||||
await withTaskGroup { group in
|
||||
// Spawn each cancellation in parallel for better execution speed
|
||||
group.addTask {
|
||||
await self.subscriptionSwitcherTask?.cancel()
|
||||
try? await self.subscriptionSwitcherTask?.value
|
||||
}
|
||||
group.addTask {
|
||||
await self.profileListenerTask?.cancel()
|
||||
try? await self.profileListenerTask?.value
|
||||
}
|
||||
// But await for all of them to be done before returning to avoid race conditions
|
||||
for await value in group { continue }
|
||||
}
|
||||
}
|
||||
|
||||
private func restartProfileListenerTask() {
|
||||
self.profileListenerTask?.cancel()
|
||||
self.profileListenerTask = Task {
|
||||
try await self.listenToProfileChanges()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Listening and publishing of profile changes
|
||||
|
||||
private func listenToProfileChanges() async throws {
|
||||
let pubkeys = Array(streams.keys)
|
||||
guard pubkeys.count > 0 else { return }
|
||||
let profileFilter = NostrFilter(kinds: [.metadata], authors: pubkeys)
|
||||
try Task.checkCancellation()
|
||||
for await ndbLender in self.subscriptionManager.streamIndefinitely(filters: [profileFilter], streamMode: .ndbFirst(optimizeNetworkFilter: false)) {
|
||||
try Task.checkCancellation()
|
||||
try? ndbLender.borrow { ev in
|
||||
publishProfileUpdates(metadataEvent: ev)
|
||||
}
|
||||
try Task.checkCancellation()
|
||||
}
|
||||
}
|
||||
|
||||
private func publishProfileUpdates(metadataEvent: borrowing UnownedNdbNote) {
|
||||
let now = UInt64(Date.now.timeIntervalSince1970)
|
||||
try? ndb.write_profile_last_fetched(pubkey: metadataEvent.pubkey, fetched_at: now)
|
||||
|
||||
if let relevantStreams = streams[metadataEvent.pubkey] {
|
||||
// If we have the user metadata event in ndb, then we should have the profile record as well.
|
||||
guard let profile = try? ndb.lookup_profile_and_copy(metadataEvent.pubkey) else { return }
|
||||
for relevantStream in relevantStreams.values {
|
||||
relevantStream.continuation.yield(profile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Manually trigger profile updates for a given pubkey
|
||||
/// This is useful for local profile changes (e.g., nip05 validation, donation percentage updates)
|
||||
func notifyProfileUpdate(pubkey: Pubkey) {
|
||||
if let relevantStreams = streams[pubkey] {
|
||||
guard let profile = try? ndb.lookup_profile_and_copy(pubkey) else { return }
|
||||
for relevantStream in relevantStreams.values {
|
||||
relevantStream.continuation.yield(profile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Streaming interface
|
||||
|
||||
/// Streams profile updates for a single pubkey.
|
||||
///
|
||||
/// By default, the stream immediately yields the existing profile from NostrDB
|
||||
/// (if available), then continues yielding updates as they arrive from the network.
|
||||
///
|
||||
/// This immediate yield is essential for views that display profile data (names,
|
||||
/// pictures) because the subscription restart has a ~1 second delay. Without it,
|
||||
/// views would flash abbreviated pubkeys or robohash placeholders.
|
||||
///
|
||||
/// Set `yieldCached: false` for subscribers that only need network updates (e.g.,
|
||||
/// re-rendering content when profiles change) and already handle initial state
|
||||
/// through other means.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - pubkey: The pubkey to stream profile updates for
|
||||
/// - yieldCached: Whether to immediately yield the cached profile. Defaults to `true`.
|
||||
/// - Returns: An AsyncStream that yields Profile objects
|
||||
func streamProfile(pubkey: Pubkey, yieldCached: Bool = true) -> AsyncStream<ProfileStreamItem> {
|
||||
return AsyncStream<ProfileStreamItem> { continuation in
|
||||
let stream = ProfileStreamInfo(continuation: continuation)
|
||||
self.add(pubkey: pubkey, stream: stream)
|
||||
|
||||
// Yield cached profile immediately so views don't flash placeholder content.
|
||||
// Callers that only need updates (not initial state) can opt out via yieldCached: false.
|
||||
if yieldCached, let existingProfile = try? ndb.lookup_profile_and_copy(pubkey) {
|
||||
continuation.yield(existingProfile)
|
||||
}
|
||||
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
Task { await self.removeStream(pubkey: pubkey, id: stream.id) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Streams profile updates for multiple pubkeys.
|
||||
///
|
||||
/// Same behavior as `streamProfile(_:yieldCached:)` but for a set of pubkeys.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - pubkeys: The set of pubkeys to stream profile updates for
|
||||
/// - yieldCached: Whether to immediately yield cached profiles. Defaults to `true`.
|
||||
/// - Returns: An AsyncStream that yields Profile objects
|
||||
func streamProfiles(pubkeys: Set<Pubkey>, yieldCached: Bool = true) -> AsyncStream<ProfileStreamItem> {
|
||||
guard !pubkeys.isEmpty else {
|
||||
return AsyncStream<ProfileStreamItem> { continuation in
|
||||
continuation.finish()
|
||||
}
|
||||
}
|
||||
|
||||
return AsyncStream<ProfileStreamItem> { continuation in
|
||||
let stream = ProfileStreamInfo(continuation: continuation)
|
||||
for pubkey in pubkeys {
|
||||
self.add(pubkey: pubkey, stream: stream)
|
||||
}
|
||||
|
||||
// Yield cached profiles immediately so views render correctly from the start.
|
||||
// Callers that only need updates (not initial state) can opt out via yieldCached: false.
|
||||
if yieldCached {
|
||||
for pubkey in pubkeys {
|
||||
if let existingProfile = try? ndb.lookup_profile_and_copy(pubkey) {
|
||||
continuation.yield(existingProfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
Task {
|
||||
for pubkey in pubkeys {
|
||||
await self.removeStream(pubkey: pubkey, id: stream.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Stream management
|
||||
|
||||
private func add(pubkey: Pubkey, stream: ProfileStreamInfo) {
|
||||
if self.streams[pubkey] == nil {
|
||||
self.streams[pubkey] = [:]
|
||||
self.subscriptionNeedsUpdate = true
|
||||
}
|
||||
self.streams[pubkey]?[stream.id] = stream
|
||||
}
|
||||
|
||||
func removeStream(pubkey: Pubkey, id: UUID) {
|
||||
self.streams[pubkey]?[id] = nil
|
||||
if self.streams[pubkey]?.keys.count == 0 {
|
||||
// We don't need to subscribe to this profile anymore
|
||||
self.streams[pubkey] = nil
|
||||
self.subscriptionNeedsUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Helper types
|
||||
|
||||
typealias ProfileStreamItem = Profile
|
||||
|
||||
struct ProfileStreamInfo {
|
||||
let id: UUID = UUID()
|
||||
let continuation: AsyncStream<ProfileStreamItem>.Continuation
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,564 @@
|
||||
//
|
||||
// SubscriptionManager.swift
|
||||
// damus
|
||||
//
|
||||
// Created by Daniel D’Aquino on 2025-03-25.
|
||||
//
|
||||
import Foundation
|
||||
import os
|
||||
|
||||
|
||||
extension NostrNetworkManager {
|
||||
/// Reads or fetches information from RelayPool and NostrDB, and provides an easier and unified higher-level interface.
|
||||
///
|
||||
/// ## Implementation notes
|
||||
///
|
||||
/// - This class will be a key part of the local relay model migration. Most higher-level code should fetch content from this class, which will properly setup the correct relay pool subscriptions, and provide a stream from NostrDB for higher performance and reliability.
|
||||
class SubscriptionManager {
|
||||
private let pool: RelayPool
|
||||
private var ndb: Ndb
|
||||
private var taskManager: TaskManager
|
||||
private let experimentalLocalRelayModelSupport: Bool
|
||||
|
||||
private static let logger = Logger(
|
||||
subsystem: Constants.MAIN_APP_BUNDLE_IDENTIFIER,
|
||||
category: "subscription_manager"
|
||||
)
|
||||
|
||||
init(pool: RelayPool, ndb: Ndb, experimentalLocalRelayModelSupport: Bool) {
|
||||
self.pool = pool
|
||||
self.ndb = ndb
|
||||
self.taskManager = TaskManager()
|
||||
self.experimentalLocalRelayModelSupport = experimentalLocalRelayModelSupport
|
||||
}
|
||||
|
||||
// MARK: - Subscribing and Streaming data from Nostr
|
||||
|
||||
/// Streams notes until the EOSE signal
|
||||
func streamExistingEvents(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||
let timeout = timeout ?? .seconds(10)
|
||||
return AsyncStream<NdbNoteLender> { continuation in
|
||||
let streamingTask = Task {
|
||||
outerLoop: for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
switch item {
|
||||
case .event(let lender):
|
||||
continuation.yield(lender)
|
||||
case .eose:
|
||||
break outerLoop
|
||||
case .ndbEose:
|
||||
continue
|
||||
case .networkEose:
|
||||
continue
|
||||
}
|
||||
}
|
||||
continuation.finish()
|
||||
}
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
streamingTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribes to data from user's relays, for a maximum period of time — after which the stream will end.
|
||||
///
|
||||
/// This is useful when waiting for some specific data from Nostr, but not indefinitely.
|
||||
func timedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||
return AsyncStream<NdbNoteLender> { continuation in
|
||||
let streamingTask = Task {
|
||||
for await item in self.advancedStream(filters: filters, to: desiredRelays, timeout: timeout, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
switch item {
|
||||
case .event(lender: let lender):
|
||||
continuation.yield(lender)
|
||||
case .eose: break
|
||||
case .ndbEose: break
|
||||
case .networkEose: break
|
||||
}
|
||||
}
|
||||
continuation.finish()
|
||||
}
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
streamingTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribes to notes indefinitely
|
||||
///
|
||||
/// This is useful when simply streaming all events indefinitely
|
||||
func streamIndefinitely(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<NdbNoteLender> {
|
||||
return AsyncStream<NdbNoteLender> { continuation in
|
||||
let streamingTask = Task {
|
||||
for await item in self.advancedStream(filters: filters, to: desiredRelays, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
switch item {
|
||||
case .event(lender: let lender):
|
||||
continuation.yield(lender)
|
||||
case .eose:
|
||||
break
|
||||
case .ndbEose:
|
||||
break
|
||||
case .networkEose:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
streamingTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func advancedStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, timeout: Duration? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
||||
let id = id ?? UUID()
|
||||
let streamMode = streamMode ?? defaultStreamMode()
|
||||
return AsyncStream<StreamItem> { continuation in
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): Started")
|
||||
var ndbEOSEIssued = false
|
||||
var networkEOSEIssued = false
|
||||
|
||||
// This closure function issues (yields) an EOSE signal to the stream if all relevant conditions are met
|
||||
let yieldEOSEIfReady = {
|
||||
let connectedToNetwork = self.pool.network_monitor.currentPath.status == .satisfied
|
||||
// In normal mode: Issuing EOSE requires EOSE from both NDB and the network, since they are all considered separate relays
|
||||
// In experimental local relay model mode: Issuing EOSE requires only EOSE from NDB, since that is the only relay that "matters"
|
||||
let canIssueEOSE = switch streamMode {
|
||||
case .ndbFirst, .ndbOnly: (ndbEOSEIssued)
|
||||
case .ndbAndNetworkParallel: (ndbEOSEIssued && (networkEOSEIssued || !connectedToNetwork))
|
||||
}
|
||||
|
||||
if canIssueEOSE {
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): Issued EOSE for session. Elapsed: \(CFAbsoluteTimeGetCurrent() - startTime, format: .fixed(precision: 2), privacy: .public) seconds")
|
||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||
continuation.yield(.eose)
|
||||
}
|
||||
}
|
||||
|
||||
var networkStreamTask: Task<Void, any Error>? = nil
|
||||
var latestNoteTimestampSeen: UInt32? = nil
|
||||
|
||||
let startNetworkStreamTask = {
|
||||
guard streamMode.shouldStreamFromNetwork else { return }
|
||||
networkStreamTask = Task {
|
||||
while !Task.isCancelled {
|
||||
let optimizedFilters = filters.map {
|
||||
var optimizedFilter = $0
|
||||
// Shift the since filter 2 minutes (120 seconds) before the last note timestamp
|
||||
if let latestTimestamp = latestNoteTimestampSeen {
|
||||
optimizedFilter.since = latestTimestamp > 120 ? latestTimestamp - 120 : 0
|
||||
}
|
||||
return optimizedFilter
|
||||
}
|
||||
for await item in self.multiSessionNetworkStream(filters: optimizedFilters, to: desiredRelays, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
logStreamPipelineStats("SubscriptionManager_Network_Stream_\(id)", "SubscriptionManager_Advanced_Stream_\(id)")
|
||||
switch item {
|
||||
case .event(let lender):
|
||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||
continuation.yield(item)
|
||||
case .eose:
|
||||
break // Should not happen
|
||||
case .ndbEose:
|
||||
break // Should not happen
|
||||
case .networkEose:
|
||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||
continuation.yield(item)
|
||||
networkEOSEIssued = true
|
||||
yieldEOSEIfReady()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if streamMode.optimizeNetworkFilter == false && streamMode.shouldStreamFromNetwork {
|
||||
// Start streaming from the network straight away
|
||||
startNetworkStreamTask()
|
||||
}
|
||||
|
||||
let ndbStreamTask = Task {
|
||||
while !Task.isCancelled {
|
||||
for await item in self.multiSessionNdbStream(filters: filters, to: desiredRelays, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
logStreamPipelineStats("SubscriptionManager_Ndb_MultiSession_Stream_\(id)", "SubscriptionManager_Advanced_Stream_\(id)")
|
||||
switch item {
|
||||
case .event(let lender):
|
||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||
try? lender.borrow({ event in
|
||||
if let latestTimestamp = latestNoteTimestampSeen {
|
||||
latestNoteTimestampSeen = max(latestTimestamp, event.createdAt)
|
||||
}
|
||||
else {
|
||||
latestNoteTimestampSeen = event.createdAt
|
||||
}
|
||||
})
|
||||
continuation.yield(item)
|
||||
case .eose:
|
||||
break // Should not happen
|
||||
case .ndbEose:
|
||||
logStreamPipelineStats("SubscriptionManager_Advanced_Stream_\(id)", "Consumer_\(id)")
|
||||
continuation.yield(item)
|
||||
ndbEOSEIssued = true
|
||||
if streamMode.optimizeNetworkFilter && streamMode.shouldStreamFromNetwork {
|
||||
startNetworkStreamTask()
|
||||
}
|
||||
yieldEOSEIfReady()
|
||||
case .networkEose:
|
||||
break // Should not happen
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
networkStreamTask?.cancel()
|
||||
ndbStreamTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func multiSessionNetworkStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
||||
let id = id ?? UUID()
|
||||
let streamMode = streamMode ?? defaultStreamMode()
|
||||
return AsyncStream<StreamItem> { continuation in
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
Self.logger.debug("Network subscription \(id.uuidString, privacy: .public): Started")
|
||||
|
||||
let streamTask = Task {
|
||||
while await !self.pool.open {
|
||||
Self.logger.info("\(id.uuidString, privacy: .public): RelayPool closed. Sleeping for 1 second before resuming.")
|
||||
try await Task.sleep(nanoseconds: 1_000_000_000)
|
||||
continue
|
||||
}
|
||||
|
||||
do {
|
||||
for await item in await self.pool.subscribe(filters: filters, to: desiredRelays, id: id) {
|
||||
try Task.checkCancellation()
|
||||
logStreamPipelineStats("RelayPool_Handler_\(id)", "SubscriptionManager_Network_Stream_\(id)")
|
||||
switch item {
|
||||
case .event(let event):
|
||||
switch streamMode {
|
||||
case .ndbFirst, .ndbOnly:
|
||||
break // NO-OP
|
||||
case .ndbAndNetworkParallel:
|
||||
continuation.yield(.event(lender: NdbNoteLender(ownedNdbNote: event)))
|
||||
}
|
||||
case .eose:
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): Received EOSE from the network. Elapsed: \(CFAbsoluteTimeGetCurrent() - startTime, format: .fixed(precision: 2), privacy: .public) seconds")
|
||||
continuation.yield(.networkEose)
|
||||
}
|
||||
}
|
||||
}
|
||||
catch {
|
||||
Self.logger.error("Network subscription \(id.uuidString, privacy: .public): Streaming error: \(error.localizedDescription, privacy: .public)")
|
||||
}
|
||||
Self.logger.debug("Network subscription \(id.uuidString, privacy: .public): Network streaming ended")
|
||||
continuation.finish()
|
||||
}
|
||||
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
streamTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func multiSessionNdbStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
||||
return AsyncStream<StreamItem> { continuation in
|
||||
let subscriptionId = id ?? UUID()
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
Self.logger.info("Starting multi-session NDB subscription \(subscriptionId.uuidString, privacy: .public): \(filters.debugDescription, privacy: .private)")
|
||||
let multiSessionStreamingTask = Task {
|
||||
while !Task.isCancelled {
|
||||
do {
|
||||
guard !self.ndb.is_closed else {
|
||||
Self.logger.info("\(subscriptionId.uuidString, privacy: .public): Ndb closed. Sleeping for 1 second before resuming.")
|
||||
try await Task.sleep(nanoseconds: 1_000_000_000)
|
||||
continue
|
||||
}
|
||||
Self.logger.info("\(subscriptionId.uuidString, privacy: .public): Streaming from NDB.")
|
||||
for await item in self.sessionNdbStream(filters: filters, to: desiredRelays, streamMode: streamMode, id: id) {
|
||||
try Task.checkCancellation()
|
||||
logStreamPipelineStats("SubscriptionManager_Ndb_Session_Stream_\(id?.uuidString ?? "NoID")", "SubscriptionManager_Ndb_MultiSession_Stream_\(id?.uuidString ?? "NoID")")
|
||||
continuation.yield(item)
|
||||
}
|
||||
Self.logger.info("\(subscriptionId.uuidString, privacy: .public): Session subscription ended. Sleeping for 1 second before resuming.")
|
||||
try await Task.sleep(nanoseconds: 1_000_000_000)
|
||||
}
|
||||
catch {
|
||||
Self.logger.error("Session subscription \(subscriptionId.uuidString, privacy: .public): Error: \(error.localizedDescription, privacy: .public)")
|
||||
}
|
||||
}
|
||||
Self.logger.info("\(subscriptionId.uuidString, privacy: .public): Terminated.")
|
||||
}
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
Self.logger.info("\(subscriptionId.uuidString, privacy: .public): Cancelled multi-session NDB stream.")
|
||||
multiSessionStreamingTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func sessionNdbStream(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, streamMode: StreamMode? = nil, id: UUID? = nil) -> AsyncStream<StreamItem> {
|
||||
let id = id ?? UUID()
|
||||
//let streamMode = streamMode ?? defaultStreamMode()
|
||||
return AsyncStream<StreamItem> { continuation in
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): Started")
|
||||
|
||||
let ndbStreamTask = Task {
|
||||
do {
|
||||
for await item in try self.ndb.subscribe(filters: try filters.map({ try NdbFilter(from: $0) })) {
|
||||
try Task.checkCancellation()
|
||||
switch item {
|
||||
case .eose:
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): Received EOSE from nostrdb. Elapsed: \(CFAbsoluteTimeGetCurrent() - startTime, format: .fixed(precision: 2), privacy: .public) seconds")
|
||||
continuation.yield(.ndbEose)
|
||||
case .event(let noteKey):
|
||||
let lender = NdbNoteLender(ndb: self.ndb, noteKey: noteKey)
|
||||
try Task.checkCancellation()
|
||||
guard let desiredRelays else {
|
||||
continuation.yield(.event(lender: lender)) // If no desired relays are specified, return all notes we see.
|
||||
break
|
||||
}
|
||||
if try ndb.was(noteKey: noteKey, seenOnAnyOf: desiredRelays) {
|
||||
continuation.yield(.event(lender: lender)) // If desired relays were specified and this note was seen there, return it.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch {
|
||||
Self.logger.error("Session subscription \(id.uuidString, privacy: .public): NDB streaming error: \(error.localizedDescription, privacy: .public)")
|
||||
}
|
||||
Self.logger.debug("Session subscription \(id.uuidString, privacy: .public): NDB streaming ended")
|
||||
continuation.finish()
|
||||
}
|
||||
|
||||
Task {
|
||||
// Add the ndb streaming task to the task manager so that it can be cancelled when the app is backgrounded
|
||||
let ndbStreamTaskId = await self.taskManager.add(task: ndbStreamTask)
|
||||
|
||||
continuation.onTermination = { @Sendable _ in
|
||||
Task {
|
||||
await self.taskManager.cancelAndCleanUp(taskId: ndbStreamTaskId)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Utility functions
|
||||
|
||||
private func defaultStreamMode() -> StreamMode {
|
||||
self.experimentalLocalRelayModelSupport ? .ndbFirst(optimizeNetworkFilter: false) : .ndbAndNetworkParallel(optimizeNetworkFilter: false)
|
||||
}
|
||||
|
||||
// MARK: - Finding specific data from Nostr
|
||||
|
||||
/// Finds a non-replaceable event based on a note ID
|
||||
func lookup(noteId: NoteId, to targetRelays: [RelayURL]? = nil, timeout: Duration? = nil) async throws -> NdbNoteLender? {
|
||||
let filter = NostrFilter(ids: [noteId], limit: 1)
|
||||
|
||||
// Since note ids point to immutable objects, we can do a simple ndb lookup first
|
||||
if let noteKey = try? self.ndb.lookup_note_key(noteId) {
|
||||
return NdbNoteLender(ndb: self.ndb, noteKey: noteKey)
|
||||
}
|
||||
|
||||
// Not available in local ndb, stream from network
|
||||
outerLoop: for await item in await self.pool.subscribe(filters: [NostrFilter(ids: [noteId], limit: 1)], to: targetRelays, eoseTimeout: timeout) {
|
||||
switch item {
|
||||
case .event(let event):
|
||||
return NdbNoteLender(ownedNdbNote: event)
|
||||
case .eose:
|
||||
break outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func query(filters: [NostrFilter], to: [RelayURL]? = nil, timeout: Duration? = nil) async -> [NostrEvent] {
|
||||
var events: [NostrEvent] = []
|
||||
for await noteLender in self.streamExistingEvents(filters: filters, to: to, timeout: timeout) {
|
||||
noteLender.justUseACopy({ events.append($0) })
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
/// Finds a replaceable event based on an `naddr` address.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - naddr: the `naddr` address
|
||||
func lookup(naddr: NAddr, to targetRelays: [RelayURL]? = nil, timeout: Duration? = nil) async -> NostrEvent? {
|
||||
var nostrKinds: [NostrKind]? = NostrKind(rawValue: naddr.kind).map { [$0] }
|
||||
|
||||
let filter = NostrFilter(kinds: nostrKinds, authors: [naddr.author])
|
||||
|
||||
for await noteLender in self.streamExistingEvents(filters: [filter], to: targetRelays, timeout: timeout) {
|
||||
// TODO: This can be refactored to borrow the note instead of copying it. But we need to implement `referenced_params` on `UnownedNdbNote` to do so
|
||||
guard let event = noteLender.justGetACopy() else { continue }
|
||||
if event.referenced_params.first?.param.string() == naddr.identifier {
|
||||
return event
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Improve this. This is mostly intact to keep compatibility with its predecessor, but we can do better
|
||||
func findEvent(query: FindEvent) async -> FoundEvent? {
|
||||
var filter: NostrFilter? = nil
|
||||
let find_from = query.find_from
|
||||
let query = query.type
|
||||
|
||||
switch query {
|
||||
case .profile(let pubkey):
|
||||
let profileNotNil = try? self.ndb.lookup_profile(pubkey, borrow: { pr in
|
||||
switch pr {
|
||||
case .some(let pr): return pr.profile != nil
|
||||
case .none: return true
|
||||
}
|
||||
})
|
||||
if profileNotNil ?? false {
|
||||
return .profile(pubkey)
|
||||
}
|
||||
filter = NostrFilter(kinds: [.metadata], limit: 1, authors: [pubkey])
|
||||
case .event(let evid):
|
||||
if let event = try? self.ndb.lookup_note_and_copy(evid) {
|
||||
return .event(event)
|
||||
}
|
||||
filter = NostrFilter(ids: [evid], limit: 1)
|
||||
}
|
||||
|
||||
var attempts: Int = 0
|
||||
var has_event = false
|
||||
guard let filter else { return nil }
|
||||
|
||||
for await noteLender in self.streamExistingEvents(filters: [filter], to: find_from) {
|
||||
let foundEvent: FoundEvent? = try? noteLender.borrow({ event in
|
||||
switch query {
|
||||
case .profile:
|
||||
if event.known_kind == .metadata {
|
||||
return .profile(event.pubkey)
|
||||
}
|
||||
case .event:
|
||||
return .event(event.toOwned())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if let foundEvent {
|
||||
return foundEvent
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MARK: - Task management
|
||||
|
||||
func cancelAllTasks() async {
|
||||
await self.taskManager.cancelAllTasks()
|
||||
}
|
||||
|
||||
actor TaskManager {
|
||||
private var tasks: [UUID: Task<Void, Never>] = [:]
|
||||
|
||||
private static let logger = Logger(
|
||||
subsystem: "com.jb55.damus",
|
||||
category: "subscription_manager.task_manager"
|
||||
)
|
||||
|
||||
func add(task: Task<Void, Never>) -> UUID {
|
||||
let taskId = UUID()
|
||||
self.tasks[taskId] = task
|
||||
return taskId
|
||||
}
|
||||
|
||||
func cancelAndCleanUp(taskId: UUID) async {
|
||||
self.tasks[taskId]?.cancel()
|
||||
await self.tasks[taskId]?.value
|
||||
self.tasks[taskId] = nil
|
||||
return
|
||||
}
|
||||
|
||||
func cancelAllTasks() async {
|
||||
await withTaskGroup { group in
|
||||
Self.logger.info("Cancelling all SubscriptionManager tasks")
|
||||
// Start each task cancellation in parallel for faster execution
|
||||
for (taskId, _) in self.tasks {
|
||||
Self.logger.info("Cancelling SubscriptionManager task \(taskId.uuidString, privacy: .public)")
|
||||
group.addTask {
|
||||
await self.cancelAndCleanUp(taskId: taskId)
|
||||
}
|
||||
}
|
||||
// However, wait until all cancellations are complete to avoid race conditions.
|
||||
for await value in group {
|
||||
continue
|
||||
}
|
||||
Self.logger.info("Cancelled all SubscriptionManager tasks")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum StreamItem {
|
||||
/// An event which can be borrowed from NostrDB
|
||||
case event(lender: NdbNoteLender)
|
||||
/// The canonical generic "end of stored events", which depends on the stream mode. See `StreamMode` to see when this event is fired in relation to other EOSEs
|
||||
case eose
|
||||
/// "End of stored events" from NostrDB.
|
||||
case ndbEose
|
||||
/// "End of stored events" from all relays in `RelayPool`.
|
||||
case networkEose
|
||||
|
||||
var debugDescription: String {
|
||||
switch self {
|
||||
case .event(lender: let lender):
|
||||
let detailedDescription = try? lender.borrow({ event in
|
||||
"Note with ID: \(event.id.hex())"
|
||||
})
|
||||
return detailedDescription ?? "Some note"
|
||||
case .eose:
|
||||
return "EOSE"
|
||||
case .ndbEose:
|
||||
return "NDB EOSE"
|
||||
case .networkEose:
|
||||
return "NETWORK EOSE"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The mode of streaming
|
||||
enum StreamMode {
|
||||
/// Returns notes exclusively through NostrDB, treating it as the only channel for information in the pipeline. Generic EOSE is fired when EOSE is received from NostrDB
|
||||
/// `optimizeNetworkFilter`: Returns notes from ndb, then streams from the network with an added "since" filter set to the latest note stored on ndb.
|
||||
case ndbFirst(optimizeNetworkFilter: Bool)
|
||||
/// Returns notes from both NostrDB and the network, in parallel, treating it with similar importance against the network relays. Generic EOSE is fired when EOSE is received from both the network and NostrDB
|
||||
/// `optimizeNetworkFilter`: Returns notes from ndb, then streams from the network with an added "since" filter set to the latest note stored on ndb.
|
||||
case ndbAndNetworkParallel(optimizeNetworkFilter: Bool)
|
||||
/// Ignores the network.
|
||||
case ndbOnly
|
||||
|
||||
var optimizeNetworkFilter: Bool {
|
||||
switch self {
|
||||
case .ndbFirst(optimizeNetworkFilter: let optimizeNetworkFilter):
|
||||
return optimizeNetworkFilter
|
||||
case .ndbAndNetworkParallel(optimizeNetworkFilter: let optimizeNetworkFilter):
|
||||
return optimizeNetworkFilter
|
||||
case .ndbOnly:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var shouldStreamFromNetwork: Bool {
|
||||
switch self {
|
||||
case .ndbFirst:
|
||||
return true
|
||||
case .ndbAndNetworkParallel:
|
||||
return true
|
||||
case .ndbOnly:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,7 @@ extension NostrNetworkManager {
|
||||
|
||||
// MARK: - Computing the relays to connect to
|
||||
|
||||
@MainActor
|
||||
private func relaysToConnectTo() -> [RelayPool.RelayDescriptor] {
|
||||
return self.computeRelaysToConnectTo(with: self.getBestEffortRelayList())
|
||||
}
|
||||
@@ -49,6 +50,7 @@ extension NostrNetworkManager {
|
||||
/// It attempts to get a relay list from the user. If one is not available, it uses the default bootstrap list.
|
||||
///
|
||||
/// This is always guaranteed to return a relay list.
|
||||
@MainActor
|
||||
func getBestEffortRelayList() -> NIP65.RelayList {
|
||||
guard let userCurrentRelayList = self.getUserCurrentRelayList() else {
|
||||
return NIP65.RelayList(relays: delegate.bootstrapRelays)
|
||||
@@ -59,6 +61,7 @@ extension NostrNetworkManager {
|
||||
/// Gets the user's current relay list.
|
||||
///
|
||||
/// It attempts to get a NIP-65 relay list from the local database, or falls back to a legacy list.
|
||||
@MainActor
|
||||
func getUserCurrentRelayList() -> NIP65.RelayList? {
|
||||
if let latestRelayListEvent = try? self.getLatestNIP65RelayList() { return latestRelayListEvent }
|
||||
if let latestRelayListEvent = try? self.getLatestKind3RelayList() { return latestRelayListEvent }
|
||||
@@ -87,12 +90,13 @@ extension NostrNetworkManager {
|
||||
private func getLatestNIP65RelayListEvent() -> NdbNote? {
|
||||
guard let latestRelayListEventId = delegate.latestRelayListEventIdHex else { return nil }
|
||||
guard let latestRelayListEventId = NoteId(hex: latestRelayListEventId) else { return nil }
|
||||
return delegate.ndb.lookup_note(latestRelayListEventId)?.unsafeUnownedValue?.to_owned()
|
||||
return try? delegate.ndb.lookup_note_and_copy(latestRelayListEventId)
|
||||
}
|
||||
|
||||
/// Gets the latest `kind:3` relay list from NostrDB.
|
||||
///
|
||||
/// This is `private` because it is part of internal logic. Callers should use the higher level functions.
|
||||
@MainActor
|
||||
private func getLatestKind3RelayList() throws(LoadingError) -> NIP65.RelayList? {
|
||||
guard let latestContactListEvent = delegate.latestContactListEvent else { return nil }
|
||||
guard let legacyContactList = try? NIP65.RelayList.fromLegacyContactList(latestContactListEvent) else { throw .relayListParseError }
|
||||
@@ -114,6 +118,7 @@ extension NostrNetworkManager {
|
||||
|
||||
/// Gets the creation date of the user's current relay list, with preference to NIP-65 relay lists
|
||||
/// - Returns: The current relay list's creation date
|
||||
@MainActor
|
||||
private func getUserCurrentRelayListCreationDate() -> UInt32? {
|
||||
if let latestNIP65RelayListEvent = self.getLatestNIP65RelayListEvent() { return latestNIP65RelayListEvent.created_at }
|
||||
if let latestKind3RelayListEvent = delegate.latestContactListEvent { return latestKind3RelayListEvent.created_at }
|
||||
@@ -122,72 +127,67 @@ extension NostrNetworkManager {
|
||||
|
||||
// MARK: - Listening to and handling relay updates from the network
|
||||
|
||||
func connect() {
|
||||
self.load()
|
||||
func connect() async {
|
||||
await self.load()
|
||||
|
||||
self.relayListObserverTask?.cancel()
|
||||
self.relayListObserverTask = Task { await self.listenAndHandleRelayUpdates() }
|
||||
self.walletUpdatesObserverTask?.cancel()
|
||||
self.walletUpdatesObserverTask = handle_notify(.attached_wallet).sink { _ in self.load() }
|
||||
self.walletUpdatesObserverTask = handle_notify(.attached_wallet).sink { _ in Task { await self.load() } }
|
||||
}
|
||||
|
||||
func listenAndHandleRelayUpdates() async {
|
||||
let filter = NostrFilter(kinds: [.relay_list], authors: [delegate.keypair.pubkey])
|
||||
for await item in self.reader.subscribe(filters: [filter]) {
|
||||
switch item {
|
||||
case .event(borrow: let borrow): // Signature validity already ensured at this point
|
||||
let currentRelayListCreationDate = self.getUserCurrentRelayListCreationDate()
|
||||
try? borrow { note in
|
||||
guard note.pubkey == self.delegate.keypair.pubkey else { return } // Ensure this new list was ours
|
||||
guard note.createdAt > (currentRelayListCreationDate ?? 0) else { return } // Ensure this is a newer list
|
||||
guard let relayList = try? NIP65.RelayList(event: note) else { return } // Ensure it is a valid NIP-65 list
|
||||
|
||||
try? self.set(userRelayList: relayList) // Set the validated list
|
||||
}
|
||||
case .eose: continue
|
||||
}
|
||||
for await noteLender in self.reader.streamIndefinitely(filters: [filter]) {
|
||||
let currentRelayListCreationDate = await self.getUserCurrentRelayListCreationDate()
|
||||
guard let note = noteLender.justGetACopy() else { continue }
|
||||
guard note.pubkey == self.delegate.keypair.pubkey else { continue } // Ensure this new list was ours
|
||||
guard note.created_at > (currentRelayListCreationDate ?? 0) else { continue } // Ensure this is a newer list
|
||||
guard let relayList = try? NIP65.RelayList(event: note) else { continue } // Ensure it is a valid NIP-65 list
|
||||
|
||||
try? await self.set(userRelayList: relayList) // Set the validated list
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Editing the user's relay list
|
||||
|
||||
func upsert(relay: NIP65.RelayList.RelayItem, force: Bool = false, overwriteExisting: Bool = false) throws(UpdateError) {
|
||||
guard let currentUserRelayList = force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
func upsert(relay: NIP65.RelayList.RelayItem, force: Bool = false, overwriteExisting: Bool = false) async throws(UpdateError) {
|
||||
guard let currentUserRelayList = await force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
guard !currentUserRelayList.relays.keys.contains(relay.url) || overwriteExisting else { throw .relayAlreadyExists }
|
||||
var newList = currentUserRelayList.relays
|
||||
newList[relay.url] = relay
|
||||
try self.set(userRelayList: NIP65.RelayList(relays: Array(newList.values)))
|
||||
try await self.set(userRelayList: NIP65.RelayList(relays: Array(newList.values)))
|
||||
}
|
||||
|
||||
func insert(relay: NIP65.RelayList.RelayItem, force: Bool = false) throws(UpdateError) {
|
||||
guard let currentUserRelayList = force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
func insert(relay: NIP65.RelayList.RelayItem, force: Bool = false) async throws(UpdateError) {
|
||||
guard let currentUserRelayList = await force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
guard currentUserRelayList.relays[relay.url] == nil else { throw .relayAlreadyExists }
|
||||
try self.upsert(relay: relay, force: force)
|
||||
try await self.upsert(relay: relay, force: force)
|
||||
}
|
||||
|
||||
func remove(relayURL: RelayURL, force: Bool = false) throws(UpdateError) {
|
||||
guard let currentUserRelayList = force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
func remove(relayURL: RelayURL, force: Bool = false) async throws(UpdateError) {
|
||||
guard let currentUserRelayList = await force ? self.getBestEffortRelayList() : self.getUserCurrentRelayList() else { throw .noInitialRelayList }
|
||||
guard currentUserRelayList.relays.keys.contains(relayURL) || force else { throw .noSuchRelay }
|
||||
var newList = currentUserRelayList.relays
|
||||
newList[relayURL] = nil
|
||||
try self.set(userRelayList: NIP65.RelayList(relays: Array(newList.values)))
|
||||
try await self.set(userRelayList: NIP65.RelayList(relays: Array(newList.values)))
|
||||
}
|
||||
|
||||
func set(userRelayList: NIP65.RelayList) throws(UpdateError) {
|
||||
func set(userRelayList: NIP65.RelayList) async throws(UpdateError) {
|
||||
guard let fullKeypair = delegate.keypair.to_full() else { throw .notAuthorizedToChangeRelayList }
|
||||
guard let relayListEvent = userRelayList.toNostrEvent(keypair: fullKeypair) else { throw .cannotFormRelayListEvent }
|
||||
|
||||
self.apply(newRelayList: self.computeRelaysToConnectTo(with: userRelayList))
|
||||
await self.apply(newRelayList: self.computeRelaysToConnectTo(with: userRelayList))
|
||||
|
||||
self.pool.send(.event(relayListEvent)) // This will send to NostrDB as well, which will locally save that NIP-65 event
|
||||
await self.pool.send(.event(relayListEvent)) // This will send to NostrDB as well, which will locally save that NIP-65 event
|
||||
self.delegate.latestRelayListEventIdHex = relayListEvent.id.hex() // Make sure we are able to recall this event from NostrDB
|
||||
}
|
||||
|
||||
// MARK: - Syncing our saved user relay list with the active `RelayPool`
|
||||
|
||||
/// Loads the current user relay list
|
||||
func load() {
|
||||
self.apply(newRelayList: self.relaysToConnectTo())
|
||||
func load() async {
|
||||
await self.apply(newRelayList: self.relaysToConnectTo())
|
||||
}
|
||||
|
||||
/// Loads a new relay list into the active relay pool, making sure it matches the specified relay list.
|
||||
@@ -201,7 +201,8 @@ extension NostrNetworkManager {
|
||||
///
|
||||
/// - This is `private` because syncing the user's saved relay list with the relay pool is `NostrNetworkManager`'s responsibility,
|
||||
/// so we do not want other classes to forcibly load this.
|
||||
private func apply(newRelayList: [RelayPool.RelayDescriptor]) {
|
||||
@MainActor
|
||||
private func apply(newRelayList: [RelayPool.RelayDescriptor]) async {
|
||||
let currentRelayList = self.pool.relays.map({ $0.descriptor })
|
||||
|
||||
var changed = false
|
||||
@@ -221,28 +222,39 @@ extension NostrNetworkManager {
|
||||
let relaysToRemove = currentRelayURLs.subtracting(newRelayURLs)
|
||||
let relaysToAdd = newRelayURLs.subtracting(currentRelayURLs)
|
||||
|
||||
// Remove relays not in the new list
|
||||
relaysToRemove.forEach { url in
|
||||
pool.remove_relay(url)
|
||||
changed = true
|
||||
}
|
||||
await withTaskGroup { taskGroup in
|
||||
// Remove relays not in the new list
|
||||
relaysToRemove.forEach { url in
|
||||
taskGroup.addTask(operation: { await self.pool.remove_relay(url) })
|
||||
changed = true
|
||||
}
|
||||
|
||||
// Add new relays from the new list
|
||||
relaysToAdd.forEach { url in
|
||||
guard let descriptor = newRelayList.first(where: { $0.url == url }) else { return }
|
||||
add_new_relay(
|
||||
model_cache: delegate.relayModelCache,
|
||||
relay_filters: delegate.relayFilters,
|
||||
pool: pool,
|
||||
descriptor: descriptor,
|
||||
new_relay_filters: new_relay_filters,
|
||||
logging_enabled: delegate.developerMode
|
||||
)
|
||||
changed = true
|
||||
// Add new relays from the new list
|
||||
relaysToAdd.forEach { url in
|
||||
guard let descriptor = newRelayList.first(where: { $0.url == url }) else { return }
|
||||
taskGroup.addTask(operation: {
|
||||
await add_new_relay(
|
||||
model_cache: self.delegate.relayModelCache,
|
||||
relay_filters: self.delegate.relayFilters,
|
||||
pool: self.pool,
|
||||
descriptor: descriptor,
|
||||
new_relay_filters: new_relay_filters,
|
||||
logging_enabled: self.delegate.developerMode
|
||||
)
|
||||
})
|
||||
changed = true
|
||||
}
|
||||
|
||||
for await value in taskGroup { continue }
|
||||
}
|
||||
|
||||
// Always tell RelayPool to connect whether or not we are already connected.
|
||||
// This is because:
|
||||
// 1. Internally it won't redo the connection because of internal checks
|
||||
// 2. Even if the relay list has not changed, relays may have been disconnected from app lifecycle or other events
|
||||
await pool.connect()
|
||||
|
||||
if changed {
|
||||
pool.connect()
|
||||
notify(.relays_changed)
|
||||
}
|
||||
}
|
||||
@@ -280,8 +292,8 @@ fileprivate extension NIP65.RelayList {
|
||||
/// - descriptor: The description of the relay being added
|
||||
/// - new_relay_filters: Whether to insert new relay filters
|
||||
/// - logging_enabled: Whether logging is enabled
|
||||
fileprivate func add_new_relay(model_cache: RelayModelCache, relay_filters: RelayFilters, pool: RelayPool, descriptor: RelayPool.RelayDescriptor, new_relay_filters: Bool, logging_enabled: Bool) {
|
||||
try? pool.add_relay(descriptor)
|
||||
fileprivate func add_new_relay(model_cache: RelayModelCache, relay_filters: RelayFilters, pool: RelayPool, descriptor: RelayPool.RelayDescriptor, new_relay_filters: Bool, logging_enabled: Bool) async {
|
||||
try? await pool.add_relay(descriptor)
|
||||
let url = descriptor.url
|
||||
|
||||
let relay_id = url
|
||||
@@ -299,7 +311,7 @@ fileprivate func add_new_relay(model_cache: RelayModelCache, relay_filters: Rela
|
||||
model_cache.insert(model: model)
|
||||
|
||||
if logging_enabled {
|
||||
pool.setLog(model.log, for: relay_id)
|
||||
Task { await pool.setLog(model.log, for: relay_id) }
|
||||
}
|
||||
|
||||
// if this is the first time adding filters, we should filter non-paid relays
|
||||
@@ -12,7 +12,7 @@ enum NoteContent {
|
||||
case content(String, TagsSequence?)
|
||||
|
||||
init(note: NostrEvent, keypair: Keypair) {
|
||||
if note.known_kind == .deprecated_dm || note.known_kind == .highlight {
|
||||
if note.known_kind == .dm || note.known_kind == .highlight {
|
||||
self = .content(note.get_content(keypair), note.tags)
|
||||
} else {
|
||||
self = .note(note)
|
||||
@@ -20,45 +20,6 @@ enum NoteContent {
|
||||
}
|
||||
}
|
||||
|
||||
func parsed_blocks_finish(bs: inout note_blocks, tags: TagsSequence?) -> Blocks {
|
||||
var out: [Block] = []
|
||||
|
||||
var i = 0
|
||||
while (i < bs.num_blocks) {
|
||||
let block = bs.blocks[i]
|
||||
|
||||
if let converted = Block(block, tags: tags) {
|
||||
out.append(converted)
|
||||
}
|
||||
|
||||
i += 1
|
||||
}
|
||||
|
||||
let words = Int(bs.words)
|
||||
blocks_free(&bs)
|
||||
|
||||
return Blocks(words: words, blocks: out)
|
||||
|
||||
}
|
||||
|
||||
func parse_note_content(content: NoteContent) -> Blocks {
|
||||
var bs = note_blocks()
|
||||
bs.num_blocks = 0;
|
||||
|
||||
blocks_init(&bs)
|
||||
|
||||
switch content {
|
||||
case .content(let s, let tags):
|
||||
return s.withCString { cptr in
|
||||
damus_parse_content(&bs, cptr)
|
||||
return parsed_blocks_finish(bs: &bs, tags: tags)
|
||||
}
|
||||
case .note(let note):
|
||||
damus_parse_content(&bs, note.content_raw)
|
||||
return parsed_blocks_finish(bs: &bs, tags: note.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func interpret_event_refs(tags: TagsSequence) -> ThreadReply? {
|
||||
// migration is long over, lets just do this to fix tests
|
||||
return interpret_event_refs_ndb(tags: tags)
|
||||
@@ -18,22 +18,42 @@ enum MentionType: AsciiCharacter, TagKey {
|
||||
}
|
||||
}
|
||||
|
||||
enum MentionRef: TagKeys, TagConvertible, Equatable, Hashable {
|
||||
case pubkey(Pubkey)
|
||||
case note(NoteId)
|
||||
case nevent(NEvent)
|
||||
case nprofile(NProfile)
|
||||
case nrelay(String)
|
||||
case naddr(NAddr)
|
||||
extension UnsafePointer<UInt8> {
|
||||
func as_data(size: Int) -> Data {
|
||||
return Data(bytes: self, count: size)
|
||||
}
|
||||
}
|
||||
|
||||
struct MentionRef: TagKeys, TagConvertible, Equatable, Hashable {
|
||||
let nip19: Bech32Object
|
||||
|
||||
static func pubkey(_ pubkey: Pubkey) -> MentionRef {
|
||||
self.init(nip19: .npub(pubkey))
|
||||
}
|
||||
|
||||
static func note(_ note_id: NoteId) -> MentionRef {
|
||||
return self.init(nip19: .note(note_id))
|
||||
}
|
||||
|
||||
init?(block: ndb_mention_bech32_block) {
|
||||
guard let bech32_obj = Bech32Object.init(block: block) else {
|
||||
return nil
|
||||
}
|
||||
self.nip19 = bech32_obj
|
||||
}
|
||||
|
||||
init(nip19: Bech32Object) {
|
||||
self.nip19 = nip19
|
||||
}
|
||||
|
||||
var key: MentionType {
|
||||
switch self {
|
||||
case .pubkey: return .p
|
||||
case .note: return .e
|
||||
case .nevent: return .e
|
||||
case .nprofile: return .p
|
||||
switch self.nip19 {
|
||||
case .note, .nevent: return .e
|
||||
case .nprofile, .npub: return .p
|
||||
case .nrelay: return .r
|
||||
case .naddr: return .a
|
||||
case .nscript: return .a
|
||||
case .nsec: return .p
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,33 +61,64 @@ enum MentionRef: TagKeys, TagConvertible, Equatable, Hashable {
|
||||
return Bech32Object.encode(toBech32Object())
|
||||
}
|
||||
|
||||
static func from_bech32(str: String) -> MentionRef? {
|
||||
switch Bech32Object.parse(str) {
|
||||
case .note(let noteid): return .note(noteid)
|
||||
case .npub(let pubkey): return .pubkey(pubkey)
|
||||
default: return nil
|
||||
init?(bech32_str: String) {
|
||||
guard let obj = Bech32Object.parse(bech32_str) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
self.nip19 = obj
|
||||
}
|
||||
|
||||
var pubkey: Pubkey? {
|
||||
switch self {
|
||||
case .pubkey(let pubkey): return pubkey
|
||||
switch self.nip19 {
|
||||
case .npub(let pubkey): return pubkey
|
||||
case .note: return nil
|
||||
case .nevent(let nevent): return nevent.author
|
||||
case .nprofile(let nprofile): return nprofile.author
|
||||
case .nrelay: return nil
|
||||
case .naddr: return nil
|
||||
case .nsec(let prv): return privkey_to_pubkey(privkey: prv)
|
||||
case .nscript(_): return nil
|
||||
}
|
||||
}
|
||||
|
||||
var tag: [String] {
|
||||
switch self {
|
||||
case .pubkey(let pubkey): return ["p", pubkey.hex()]
|
||||
switch self.nip19 {
|
||||
case .npub(let pubkey): return ["p", pubkey.hex()]
|
||||
case .note(let noteId): return ["e", noteId.hex()]
|
||||
case .nevent(let nevent): return ["e", nevent.noteid.hex()]
|
||||
case .nprofile(let nprofile): return ["p", nprofile.author.hex()]
|
||||
case .nevent(let nevent):
|
||||
var tagBuilder = ["e", nevent.noteid.hex()]
|
||||
|
||||
let relay = nevent.relays.first
|
||||
if let author = nevent.author?.hex() {
|
||||
tagBuilder.append(relay?.absoluteString ?? "")
|
||||
tagBuilder.append(author)
|
||||
} else if let relay {
|
||||
tagBuilder.append(relay.absoluteString)
|
||||
}
|
||||
|
||||
return tagBuilder
|
||||
case .nprofile(let nprofile):
|
||||
var tagBuilder = ["p", nprofile.author.hex()]
|
||||
|
||||
if let relay = nprofile.relays.first {
|
||||
tagBuilder.append(relay.absoluteString)
|
||||
}
|
||||
|
||||
return tagBuilder
|
||||
case .nrelay(let url): return ["r", url]
|
||||
case .naddr(let naddr): return ["a", naddr.kind.description + ":" + naddr.author.hex() + ":" + naddr.identifier.string()]
|
||||
case .naddr(let naddr):
|
||||
var tagBuilder = ["a", "\(naddr.kind.description):\(naddr.author.hex()):\(naddr.identifier.string())"]
|
||||
|
||||
if let relay = naddr.relays.first {
|
||||
tagBuilder.append(relay.absoluteString)
|
||||
}
|
||||
|
||||
return tagBuilder
|
||||
case .nsec(_):
|
||||
return []
|
||||
case .nscript(_):
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,10 +138,10 @@ enum MentionRef: TagKeys, TagConvertible, Equatable, Hashable {
|
||||
switch mention_type {
|
||||
case .p:
|
||||
guard let data = element.id() else { return nil }
|
||||
return .pubkey(Pubkey(data))
|
||||
return .init(nip19: .npub(Pubkey(data)))
|
||||
case .e:
|
||||
guard let data = element.id() else { return nil }
|
||||
return .note(NoteId(data))
|
||||
return .init(nip19: .note(NoteId(data)))
|
||||
case .a:
|
||||
let str = element.string()
|
||||
let data = str.split(separator: ":")
|
||||
@@ -99,26 +150,13 @@ enum MentionRef: TagKeys, TagConvertible, Equatable, Hashable {
|
||||
guard let pubkey = Pubkey(hex: String(data[1])) else { return nil }
|
||||
guard let kind = UInt32(data[0]) else { return nil }
|
||||
|
||||
return .naddr(NAddr(identifier: String(data[2]), author: pubkey, relays: [], kind: kind))
|
||||
case .r: return .nrelay(element.string())
|
||||
return .init(nip19: .naddr(NAddr(identifier: String(data[2]), author: pubkey, relays: [], kind: kind)))
|
||||
case .r: return .init(nip19: .nrelay(element.string()))
|
||||
}
|
||||
}
|
||||
|
||||
func toBech32Object() -> Bech32Object {
|
||||
switch self {
|
||||
case .pubkey(let pk):
|
||||
return .npub(pk)
|
||||
case .note(let noteid):
|
||||
return .note(noteid)
|
||||
case .naddr(let naddr):
|
||||
return .naddr(naddr)
|
||||
case .nevent(let nevent):
|
||||
return .nevent(nevent)
|
||||
case .nprofile(let nprofile):
|
||||
return .nprofile(nprofile)
|
||||
case .nrelay(let url):
|
||||
return .nrelay(url)
|
||||
}
|
||||
self.nip19
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +198,6 @@ struct LightningInvoice<T> {
|
||||
let amount: T
|
||||
let string: String
|
||||
let expiry: UInt64
|
||||
let payment_hash: Data
|
||||
let created_at: UInt64
|
||||
|
||||
var abbreviated: String {
|
||||
@@ -182,14 +219,14 @@ struct LightningInvoice<T> {
|
||||
// avoiding code duplication and utilizing the guarantees acquired from age and testing.
|
||||
// We could also use the C function `parse_invoice`, but it requires extra C bridging logic.
|
||||
// NDBTODO: This may need updating on the nostrdb upgrade.
|
||||
let parsedBlocks = parse_note_content(content: .content(string,nil)).blocks
|
||||
guard let parsedBlocks = parse_note_content(content: .content(string,nil))?.blocks else { return nil }
|
||||
guard parsedBlocks.count == 1 else { return nil }
|
||||
return parsedBlocks[0].asInvoice
|
||||
}
|
||||
}
|
||||
|
||||
func maybe_pointee<T>(_ p: UnsafeMutablePointer<T>!) -> T? {
|
||||
guard p != nil else {
|
||||
func maybe_pointee<T>(_ p: UnsafeMutablePointer<T>?) -> T? {
|
||||
guard let p else {
|
||||
return nil
|
||||
}
|
||||
return p.pointee
|
||||
@@ -257,7 +294,7 @@ func format_msats(_ msat: Int64, locale: Locale = Locale.current) -> String {
|
||||
return String(format: format, locale: locale, sats.decimalValue as NSDecimalNumber, formattedSats)
|
||||
}
|
||||
|
||||
func convert_invoice_description(b11: bolt11) -> InvoiceDescription? {
|
||||
func convert_invoice_description(b11: ndb_invoice) -> InvoiceDescription? {
|
||||
if let desc = b11.description {
|
||||
return .description(String(cString: desc))
|
||||
}
|
||||
@@ -282,3 +319,38 @@ func find_tag_ref(type: String, id: String, tags: [[String]]) -> Int? {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
struct PostTags {
|
||||
let blocks: [Block]
|
||||
let tags: [[String]]
|
||||
}
|
||||
|
||||
/// Convert
|
||||
func make_post_tags(post_blocks: [Block], tags: [[String]]) -> PostTags {
|
||||
var new_tags = tags
|
||||
|
||||
for post_block in post_blocks {
|
||||
switch post_block {
|
||||
case .mention(let mention):
|
||||
switch(mention.ref.nip19) {
|
||||
case .note, .nevent:
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
new_tags.append(mention.ref.tag)
|
||||
case .hashtag(let hashtag):
|
||||
new_tags.append(["t", hashtag.lowercased()])
|
||||
case .text: break
|
||||
case .invoice: break
|
||||
case .relay: break
|
||||
case .url(let url):
|
||||
new_tags.append(["r", url.absoluteString])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return PostTags(blocks: post_blocks, tags: new_tags)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ typealias Profile = NdbProfile
|
||||
typealias ProfileKey = UInt64
|
||||
//typealias ProfileRecord = NdbProfileRecord
|
||||
|
||||
class ProfileRecord {
|
||||
let data: NdbProfileRecord
|
||||
struct ProfileRecord: ~Copyable {
|
||||
private let data: NdbProfileRecord // Marked as private to make users access the safer `profile` property
|
||||
|
||||
init(data: NdbProfileRecord, key: ProfileKey) {
|
||||
self.data = data
|
||||
@@ -20,7 +20,11 @@ class ProfileRecord {
|
||||
}
|
||||
|
||||
let profileKey: ProfileKey
|
||||
var profile: Profile? { return data.profile }
|
||||
var profile: Profile? {
|
||||
// Clone the data since `NdbProfile` can be unowned, but does not `~Copyable` semantics.
|
||||
// This helps ensure the memory safety of this property
|
||||
return data.profile?.clone()
|
||||
}
|
||||
var receivedAt: UInt64 { data.receivedAt }
|
||||
var noteKey: UInt64 { data.noteKey }
|
||||
|
||||
@@ -37,10 +41,7 @@ class ProfileRecord {
|
||||
}
|
||||
|
||||
if addr.contains("@") {
|
||||
// this is a heavy op and is used a lot in views, cache it!
|
||||
let addr = lnaddress_to_lnurl(addr);
|
||||
self._lnurl = addr
|
||||
return addr
|
||||
return lnaddress_to_lnurl(addr)
|
||||
}
|
||||
|
||||
if !addr.lowercased().hasPrefix("lnurl") {
|
||||
@@ -81,6 +82,24 @@ extension NdbProfile {
|
||||
return URL(string: trim)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Clones this object. Useful for creating an owned copy from an unowned profile
|
||||
func clone() -> Self {
|
||||
return NdbProfile(
|
||||
name: self.name,
|
||||
display_name: self.display_name,
|
||||
about: self.about,
|
||||
picture: self.picture,
|
||||
banner: self.banner,
|
||||
website: self.website,
|
||||
lud06: self.lud06,
|
||||
lud16: self.lud16,
|
||||
nip05: self.nip05,
|
||||
damus_donation: self.damus_donation,
|
||||
reactions: self.reactions
|
||||
)
|
||||
}
|
||||
|
||||
init(name: String? = nil, display_name: String? = nil, about: String? = nil, picture: String? = nil, banner: String? = nil, website: String? = nil, lud06: String? = nil, lud16: String? = nil, nip05: String? = nil, damus_donation: Int? = nil, reactions: Bool = true) {
|
||||
|
||||
@@ -309,7 +328,40 @@ func make_ln_url(_ str: String?) -> URL? {
|
||||
return str.flatMap { URL(string: "lightning:" + $0) }
|
||||
}
|
||||
|
||||
import Synchronization
|
||||
|
||||
@available(iOS 18.0, *)
|
||||
class CachedLNAddressConverter {
|
||||
static let shared: CachedLNAddressConverter = .init()
|
||||
|
||||
private let cache: Mutex<[String: String?]> = .init([:]) // Using a mutex here to avoid race conditions without imposing actor isolation requirements.
|
||||
|
||||
func lnaddress_to_lnurl(_ lnaddr: String) -> String? {
|
||||
if let cachedValue = cache.withLock({ $0[lnaddr] }) {
|
||||
return cachedValue
|
||||
}
|
||||
|
||||
let lnurl: String? = compute_lnaddress_to_lnurl(lnaddr)
|
||||
|
||||
cache.withLock({ cache in
|
||||
cache[lnaddr] = .some(lnurl)
|
||||
})
|
||||
return lnurl
|
||||
}
|
||||
}
|
||||
|
||||
func lnaddress_to_lnurl(_ lnaddr: String) -> String? {
|
||||
if #available(iOS 18.0, *) {
|
||||
// This is a heavy op, use a cache if available!
|
||||
return CachedLNAddressConverter.shared.lnaddress_to_lnurl(lnaddr)
|
||||
} else {
|
||||
// Fallback on earlier versions
|
||||
return compute_lnaddress_to_lnurl(lnaddr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func compute_lnaddress_to_lnurl(_ lnaddr: String) -> String? {
|
||||
let parts = lnaddr.split(separator: "@")
|
||||
guard parts.count == 2 else {
|
||||
return nil
|
||||
@@ -322,4 +374,3 @@ func lnaddress_to_lnurl(_ lnaddr: String) -> String? {
|
||||
|
||||
return bech32_encode(hrp: "lnurl", Array(dat))
|
||||
}
|
||||
|
||||
@@ -321,7 +321,7 @@ func sign_id(privkey: String, id: String) -> String {
|
||||
|
||||
// Extra params for custom signing
|
||||
|
||||
var aux_rand = random_bytes(count: 64).bytes
|
||||
var aux_rand = random_bytes(count: 64).byteArray
|
||||
var digest = try! id.bytes
|
||||
|
||||
// API allows for signing variable length messages
|
||||
@@ -334,6 +334,27 @@ func decode_nostr_event(txt: String) -> NostrResponse? {
|
||||
return NostrResponse.owned_from_json(json: txt)
|
||||
}
|
||||
|
||||
func decode_and_verify_nostr_response(txt: String) -> NostrResponse? {
|
||||
guard let response = NostrResponse.owned_from_json(json: txt) else { return nil }
|
||||
guard verify_nostr_response(response: response) == true else { return nil }
|
||||
return response
|
||||
}
|
||||
|
||||
func verify_nostr_response(response: borrowing NostrResponse) -> Bool {
|
||||
switch response {
|
||||
case .event(_, let event):
|
||||
return event.verify()
|
||||
case .notice(_):
|
||||
return true
|
||||
case .eose(_):
|
||||
return true
|
||||
case .ok(_):
|
||||
return true
|
||||
case .auth(_):
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func encode_json<T: Encodable>(_ val: T) -> String? {
|
||||
let encoder = JSONEncoder()
|
||||
encoder.outputFormatting = .withoutEscapingSlashes
|
||||
@@ -448,17 +469,26 @@ func random_bytes(count: Int) -> Data {
|
||||
return Data(bytes: bytes, count: count)
|
||||
}
|
||||
|
||||
func make_boost_event(keypair: FullKeypair, boosted: NostrEvent) -> NostrEvent? {
|
||||
func make_boost_event(keypair: FullKeypair, boosted: NostrEvent, relayURL: RelayURL?) -> NostrEvent? {
|
||||
var tags = Array(boosted.referenced_pubkeys).map({ pk in pk.tag })
|
||||
|
||||
tags.append(["e", boosted.id.hex(), "", "root"])
|
||||
tags.append(["p", boosted.pubkey.hex()])
|
||||
var eTagBuilder = ["e", boosted.id.hex()]
|
||||
var pTagBuilder = ["p", boosted.pubkey.hex()]
|
||||
|
||||
let relayURLString = relayURL?.absoluteString
|
||||
if let relayURLString {
|
||||
pTagBuilder.append(relayURLString)
|
||||
}
|
||||
eTagBuilder.append(contentsOf: [relayURLString ?? "", "root", boosted.pubkey.hex()])
|
||||
|
||||
tags.append(eTagBuilder)
|
||||
tags.append(pTagBuilder)
|
||||
|
||||
let content = event_to_json(ev: boosted)
|
||||
return NostrEvent(content: content, keypair: keypair.to_keypair(), kind: 6, tags: tags)
|
||||
}
|
||||
|
||||
func make_like_event(keypair: FullKeypair, liked: NostrEvent, content: String = "🤙") -> NostrEvent? {
|
||||
func make_like_event(keypair: FullKeypair, liked: NostrEvent, content: String = "🤙", relayURL: RelayURL?) -> NostrEvent? {
|
||||
var tags = liked.tags.reduce(into: [[String]]()) { ts, tag in
|
||||
guard tag.count >= 2,
|
||||
(tag[0].matches_char("e") || tag[0].matches_char("p")) else {
|
||||
@@ -467,12 +497,30 @@ func make_like_event(keypair: FullKeypair, liked: NostrEvent, content: String =
|
||||
ts.append(tag.strings())
|
||||
}
|
||||
|
||||
tags.append(["e", liked.id.hex()])
|
||||
tags.append(["p", liked.pubkey.hex()])
|
||||
var eTagBuilder = ["e", liked.id.hex()]
|
||||
var pTagBuilder = ["p", liked.pubkey.hex()]
|
||||
|
||||
let relayURLString = relayURL?.absoluteString
|
||||
if let relayURLString {
|
||||
pTagBuilder.append(relayURLString)
|
||||
}
|
||||
eTagBuilder.append(contentsOf: [relayURLString ?? "", liked.pubkey.hex()])
|
||||
|
||||
tags.append(eTagBuilder)
|
||||
tags.append(pTagBuilder)
|
||||
|
||||
return NostrEvent(content: content, keypair: keypair.to_keypair(), kind: 7, tags: tags)
|
||||
}
|
||||
|
||||
func make_live_chat_event(keypair: FullKeypair, content: String, root: String, dtag: String, relayURL: RelayURL?) -> NostrEvent? {
|
||||
//var tags = Array(boosted.referenced_pubkeys).map({ pk in pk.tag })
|
||||
var aTagBuilder = ["a", "30311:\(root):\(dtag)"]
|
||||
|
||||
var tags: [[String]] = [aTagBuilder]
|
||||
|
||||
return NostrEvent(content: content, keypair: keypair.to_keypair(), kind: 1311, tags: tags)
|
||||
}
|
||||
|
||||
func generate_private_keypair(our_privkey: Privkey, id: NoteId, created_at: UInt32) -> FullKeypair? {
|
||||
let to_hash = our_privkey.hex() + id.hex() + String(created_at)
|
||||
guard let dat = to_hash.data(using: .utf8) else {
|
||||
@@ -500,6 +548,15 @@ func uniq<T: Hashable>(_ xs: [T]) -> [T] {
|
||||
return ys
|
||||
}
|
||||
|
||||
func gather_quote_ids(our_pubkey: Pubkey, from: NostrEvent) -> [RefId] {
|
||||
var ids: [RefId] = [.quote(from.id.quote_id)]
|
||||
if from.pubkey != our_pubkey {
|
||||
ids.append(.pubkey(from.pubkey))
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
|
||||
func gather_reply_ids(our_pubkey: Pubkey, from: NostrEvent) -> [RefId] {
|
||||
var ids: [RefId] = from.referenced_ids.first.map({ ref in [ .event(ref) ] }) ?? []
|
||||
|
||||
@@ -520,14 +577,6 @@ func gather_reply_ids(our_pubkey: Pubkey, from: NostrEvent) -> [RefId] {
|
||||
return ids
|
||||
}
|
||||
|
||||
func gather_quote_ids(our_pubkey: Pubkey, from: NostrEvent) -> [RefId] {
|
||||
var ids: [RefId] = [.quote(from.id.quote_id)]
|
||||
if from.pubkey != our_pubkey {
|
||||
ids.append(.pubkey(from.pubkey))
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func event_from_json(dat: String) -> NostrEvent? {
|
||||
return NostrEvent.owned_from_json(json: dat)
|
||||
}
|
||||
@@ -746,57 +795,56 @@ func validate_event(ev: NostrEvent) -> ValidationResult {
|
||||
let ctx = secp256k1.Context.raw
|
||||
var xonly_pubkey = secp256k1_xonly_pubkey.init()
|
||||
|
||||
var ev_pubkey = ev.pubkey.id.bytes
|
||||
var ev_pubkey = ev.pubkey.id.byteArray
|
||||
|
||||
var ok = secp256k1_xonly_pubkey_parse(ctx, &xonly_pubkey, &ev_pubkey) != 0
|
||||
if !ok {
|
||||
return .bad_sig
|
||||
}
|
||||
|
||||
var sig = ev.sig.data.bytes
|
||||
var idbytes = id.id.bytes
|
||||
var sig = ev.sig.data.byteArray
|
||||
var idbytes = id.id.byteArray
|
||||
|
||||
ok = secp256k1_schnorrsig_verify(ctx, &sig, &idbytes, 32, &xonly_pubkey) > 0
|
||||
return ok ? .ok : .bad_sig
|
||||
}
|
||||
|
||||
func first_eref_mention(ev: NostrEvent, keypair: Keypair) -> Mention<NoteId>? {
|
||||
let blocks = ev.blocks(keypair).blocks.filter { block in
|
||||
guard case .mention(let mention) = block else {
|
||||
return false
|
||||
func first_eref_mention(ndb: Ndb, ev: NostrEvent, keypair: Keypair) -> Mention<NoteId>? {
|
||||
return try? NdbBlockGroup.borrowBlockGroup(event: ev, using: ndb, and: keypair, borrow: { blockGroup in
|
||||
return blockGroup.forEachBlock({ index, block in
|
||||
switch block {
|
||||
case .mention(let mention):
|
||||
guard let mention = MentionRef(block: mention) else { return .loopContinue }
|
||||
switch mention.nip19 {
|
||||
case .note(let noteId):
|
||||
return .loopReturn(Mention<NoteId>.note(noteId, index: index))
|
||||
case .nevent(let nEvent):
|
||||
return .loopReturn(Mention<NoteId>.note(nEvent.noteid, index: index))
|
||||
default:
|
||||
return .loopContinue
|
||||
}
|
||||
default:
|
||||
return .loopContinue
|
||||
}
|
||||
|
||||
switch mention.ref {
|
||||
case .note, .nevent:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/// MARK: - Preview
|
||||
if let firstBlock = blocks.first,
|
||||
case .mention(let mention) = firstBlock {
|
||||
switch mention.ref {
|
||||
case .note(let note_id):
|
||||
return .note(note_id)
|
||||
case .nevent(let nevent):
|
||||
return .note(nevent.noteid)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func separate_invoices(ev: NostrEvent, keypair: Keypair) -> [Invoice]? {
|
||||
let invoiceBlocks: [Invoice] = ev.blocks(keypair).blocks.reduce(into: []) { invoices, block in
|
||||
guard case .invoice(let invoice) = block else {
|
||||
return
|
||||
}
|
||||
invoices.append(invoice)
|
||||
}
|
||||
return invoiceBlocks.isEmpty ? nil : invoiceBlocks
|
||||
func separate_invoices(ndb: Ndb, ev: NostrEvent, keypair: Keypair) -> [Invoice]? {
|
||||
return try? NdbBlockGroup.borrowBlockGroup(event: ev, using: ndb, and: keypair, borrow: { blockGroup in
|
||||
let invoiceBlocks: [Invoice] = (try? blockGroup.reduce(initialResult: [Invoice](), { index, invoices, block in
|
||||
switch block {
|
||||
case .invoice(let invoice):
|
||||
if let invoice = invoice.as_invoice() {
|
||||
return .loopReturn(invoices + [invoice])
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
return .loopContinue
|
||||
})) ?? []
|
||||
return invoiceBlocks.isEmpty ? nil : invoiceBlocks
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -832,4 +880,31 @@ extension NostrEvent {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
var debugDescription: String {
|
||||
var output = "🔍 NostrEvent Debug Info\n"
|
||||
output += "═══════════════════════════\n"
|
||||
output += "📝 ID: \(id)\n"
|
||||
output += "👤 Pubkey: \(pubkey)\n"
|
||||
output += "📅 Created: \(Date(timeIntervalSince1970: TimeInterval(created_at))) (\(created_at))\n"
|
||||
output += "🏷️ Kind: \(kind) (\(String(describing: known_kind))\n"
|
||||
output += "✍️ Signature: \(sig)\n"
|
||||
output += "📄 Content (\(content.count) chars):\n"
|
||||
output += " \"\(content.prefix(100))\(content.count > 100 ? "..." : "")\"\n"
|
||||
|
||||
output += "\n🏷️ Tags (\(tags.count) total):\n"
|
||||
for (index, tag) in tags.enumerated() {
|
||||
output += " [\(index)]: ["
|
||||
for (tagIndex, tagElem) in tag.enumerated() {
|
||||
if tagIndex > 0 { output += ", " }
|
||||
output += "\"\(tagElem.string())\""
|
||||
}
|
||||
output += "]\n"
|
||||
}
|
||||
|
||||
output += "═══════════════════════════\n"
|
||||
return output
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -13,13 +13,12 @@ enum NostrKind: UInt32, Codable {
|
||||
case metadata = 0
|
||||
case text = 1
|
||||
case contacts = 3
|
||||
case deprecated_dm = 4
|
||||
case dm = 4
|
||||
case delete = 5
|
||||
case boost = 6
|
||||
case like = 7
|
||||
case seal = 13
|
||||
case dm = 14
|
||||
case chat = 42
|
||||
case live_chat = 1311
|
||||
case mute_list = 10000
|
||||
case relay_list = 10002
|
||||
case interest_list = 10015
|
||||
@@ -29,10 +28,11 @@ enum NostrKind: UInt32, Codable {
|
||||
case zap = 9735
|
||||
case zap_request = 9734
|
||||
case highlight = 9802
|
||||
case gift_wrap = 1059
|
||||
case nwc_request = 23194
|
||||
case nwc_response = 23195
|
||||
case http_auth = 27235
|
||||
case live = 30311
|
||||
case status = 30315
|
||||
case contact_card = 30_382
|
||||
case follow_list = 39089
|
||||
}
|
||||
@@ -89,7 +89,7 @@ enum NostrResponse {
|
||||
free(data)
|
||||
return nil
|
||||
}
|
||||
let new_note = note_data.assumingMemoryBound(to: ndb_note.self)
|
||||
let new_note = ndb_note_ptr(ptr: OpaquePointer(note_data))
|
||||
let note = NdbNote(note: new_note, size: Int(len), owned: true, key: nil)
|
||||
|
||||
guard let subid = sized_cstr(cstr: tce.subid, len: tce.subid_len) else {
|
||||
35
damus/Core/Nostr/ProfileObserver.swift
Normal file
35
damus/Core/Nostr/ProfileObserver.swift
Normal file
@@ -0,0 +1,35 @@
|
||||
//
|
||||
// ProfileObserver.swift
|
||||
// damus
|
||||
//
|
||||
// Created by Daniel D’Aquino on 2025-09-19.
|
||||
//
|
||||
import Combine
|
||||
import Foundation
|
||||
|
||||
@MainActor
|
||||
class ProfileObserver: ObservableObject {
|
||||
private let pubkey: Pubkey
|
||||
private var observerTask: Task<Void, any Error>? = nil
|
||||
private let damusState: DamusState
|
||||
|
||||
init(pubkey: Pubkey, damusState: DamusState) {
|
||||
self.pubkey = pubkey
|
||||
self.damusState = damusState
|
||||
self.watchProfileChanges()
|
||||
}
|
||||
|
||||
private func watchProfileChanges() {
|
||||
observerTask?.cancel()
|
||||
observerTask = Task {
|
||||
for await _ in await damusState.nostrNetwork.profilesManager.streamProfile(pubkey: self.pubkey) {
|
||||
try Task.checkCancellation()
|
||||
DispatchQueue.main.async { self.objectWillChange.send() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deinit {
|
||||
observerTask?.cancel()
|
||||
}
|
||||
}
|
||||
@@ -74,31 +74,45 @@ class Profiles {
|
||||
profile_data(pubkey).zapper
|
||||
}
|
||||
|
||||
func lookup_with_timestamp(_ pubkey: Pubkey) -> NdbTxn<ProfileRecord?>? {
|
||||
ndb.lookup_profile(pubkey)
|
||||
func lookup_with_timestamp<T>(_ pubkey: Pubkey, borrow lendingFunction: (_: borrowing ProfileRecord?) throws -> T) throws -> T {
|
||||
return try ndb.lookup_profile(pubkey, borrow: lendingFunction)
|
||||
}
|
||||
|
||||
func lookup_lnurl(_ pubkey: Pubkey) throws -> String? {
|
||||
return try lookup_with_timestamp(pubkey, borrow: { pr in
|
||||
switch pr {
|
||||
case .some(let pr): return pr.lnurl
|
||||
case .none: return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func lookup_by_key(key: ProfileKey) -> NdbTxn<ProfileRecord?>? {
|
||||
ndb.lookup_profile_by_key(key: key)
|
||||
func lookup_by_key<T>(key: ProfileKey, borrow lendingFunction: (_: borrowing ProfileRecord?) throws -> T) throws -> T {
|
||||
return try ndb.lookup_profile_by_key(key: key, borrow: lendingFunction)
|
||||
}
|
||||
|
||||
func search<Y>(_ query: String, limit: Int, txn: NdbTxn<Y>) -> [Pubkey] {
|
||||
ndb.search_profile(query, limit: limit, txn: txn)
|
||||
func search(_ query: String, limit: Int) throws -> [Pubkey] {
|
||||
try ndb.search_profile(query, limit: limit)
|
||||
}
|
||||
|
||||
func lookup(id: Pubkey, txn_name: String? = nil) -> NdbTxn<Profile?>? {
|
||||
guard let txn = ndb.lookup_profile(id, txn_name: txn_name) else {
|
||||
return nil
|
||||
}
|
||||
return txn.map({ pr in pr?.profile })
|
||||
func lookup(id: Pubkey) throws -> Profile? {
|
||||
return try ndb.lookup_profile(id, borrow: { pr in
|
||||
switch pr {
|
||||
case .none:
|
||||
return nil
|
||||
case .some(let profileRecord):
|
||||
// This will clone the value to make it owned and safe to return.
|
||||
return profileRecord.profile
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func lookup_key_by_pubkey(_ pubkey: Pubkey) -> ProfileKey? {
|
||||
ndb.lookup_profile_key(pubkey)
|
||||
func lookup_key_by_pubkey(_ pubkey: Pubkey) throws -> ProfileKey? {
|
||||
try ndb.lookup_profile_key(pubkey)
|
||||
}
|
||||
|
||||
func has_fresh_profile<Y>(id: Pubkey, txn: NdbTxn<Y>) -> Bool {
|
||||
guard let fetched_at = ndb.read_profile_last_fetched(txn: txn, pubkey: id)
|
||||
func has_fresh_profile(id: Pubkey) throws -> Bool {
|
||||
guard let fetched_at = try ndb.read_profile_last_fetched(pubkey: id)
|
||||
else {
|
||||
return false
|
||||
}
|
||||
@@ -9,8 +9,41 @@ import Combine
|
||||
import Foundation
|
||||
|
||||
enum NostrConnectionEvent {
|
||||
case ws_event(WebSocketEvent)
|
||||
/// Other non-message websocket events
|
||||
case ws_connection_event(WSConnectionEvent)
|
||||
/// A nostr response
|
||||
case nostr_event(NostrResponse)
|
||||
|
||||
/// Models non-messaging websocket events
|
||||
///
|
||||
/// Implementation note: Messaging events should use `.nostr_event` in `NostrConnectionEvent`
|
||||
enum WSConnectionEvent {
|
||||
case connected
|
||||
case disconnected(URLSessionWebSocketTask.CloseCode, String?)
|
||||
case error(Error)
|
||||
|
||||
static func from(full_ws_event: WebSocketEvent) -> Self? {
|
||||
switch full_ws_event {
|
||||
case .connected:
|
||||
return .connected
|
||||
case .message(_):
|
||||
return nil
|
||||
case .disconnected(let closeCode, let string):
|
||||
return .disconnected(closeCode, string)
|
||||
case .error(let error):
|
||||
return .error(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var subId: String? {
|
||||
switch self {
|
||||
case .ws_connection_event(_):
|
||||
return nil
|
||||
case .nostr_event(let event):
|
||||
return event.subid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final class RelayConnection: ObservableObject {
|
||||
@@ -24,18 +57,18 @@ final class RelayConnection: ObservableObject {
|
||||
private lazy var socket = WebSocket(relay_url.url)
|
||||
private var subscriptionToken: AnyCancellable?
|
||||
|
||||
private var handleEvent: (NostrConnectionEvent) -> ()
|
||||
private var handleEvent: (NostrConnectionEvent) async -> ()
|
||||
private var processEvent: (WebSocketEvent) -> ()
|
||||
private let relay_url: RelayURL
|
||||
var log: RelayLog?
|
||||
|
||||
init(url: RelayURL,
|
||||
handleEvent: @escaping (NostrConnectionEvent) -> (),
|
||||
processEvent: @escaping (WebSocketEvent) -> ())
|
||||
handleEvent: @escaping (NostrConnectionEvent) async -> (),
|
||||
processUnverifiedWSEvent: @escaping (WebSocketEvent) -> ())
|
||||
{
|
||||
self.relay_url = url
|
||||
self.handleEvent = handleEvent
|
||||
self.processEvent = processEvent
|
||||
self.processEvent = processUnverifiedWSEvent
|
||||
}
|
||||
|
||||
func ping() {
|
||||
@@ -71,12 +104,12 @@ final class RelayConnection: ObservableObject {
|
||||
.sink { [weak self] completion in
|
||||
switch completion {
|
||||
case .failure(let error):
|
||||
self?.receive(event: .error(error))
|
||||
Task { await self?.receive(event: .error(error)) }
|
||||
case .finished:
|
||||
self?.receive(event: .disconnected(.normalClosure, nil))
|
||||
Task { await self?.receive(event: .disconnected(.normalClosure, nil)) }
|
||||
}
|
||||
} receiveValue: { [weak self] event in
|
||||
self?.receive(event: event)
|
||||
Task { await self?.receive(event: event) }
|
||||
}
|
||||
|
||||
socket.connect()
|
||||
@@ -114,7 +147,8 @@ final class RelayConnection: ObservableObject {
|
||||
}
|
||||
}
|
||||
|
||||
private func receive(event: WebSocketEvent) {
|
||||
private func receive(event: WebSocketEvent) async {
|
||||
assert(!Thread.isMainThread, "This code must not be executed on the main thread")
|
||||
processEvent(event)
|
||||
switch event {
|
||||
case .connected:
|
||||
@@ -124,7 +158,7 @@ final class RelayConnection: ObservableObject {
|
||||
self.isConnecting = false
|
||||
}
|
||||
case .message(let message):
|
||||
self.receive(message: message)
|
||||
await self.receive(message: message)
|
||||
case .disconnected(let closeCode, let reason):
|
||||
if closeCode != .normalClosure {
|
||||
Log.error("⚠️ Warning: RelayConnection (%d) closed with code: %s", for: .networking, String(describing: closeCode), String(describing: reason))
|
||||
@@ -151,9 +185,8 @@ final class RelayConnection: ObservableObject {
|
||||
self.reconnect_with_backoff()
|
||||
}
|
||||
}
|
||||
DispatchQueue.main.async {
|
||||
self.handleEvent(.ws_event(event))
|
||||
}
|
||||
guard let ws_connection_event = NostrConnectionEvent.WSConnectionEvent.from(full_ws_event: event) else { return }
|
||||
await self.handleEvent(.ws_connection_event(ws_connection_event))
|
||||
|
||||
if let description = event.description {
|
||||
log?.add(description)
|
||||
@@ -187,19 +220,19 @@ final class RelayConnection: ObservableObject {
|
||||
}
|
||||
}
|
||||
|
||||
private func receive(message: URLSessionWebSocketTask.Message) {
|
||||
private func receive(message: URLSessionWebSocketTask.Message) async {
|
||||
switch message {
|
||||
case .string(let messageString):
|
||||
if let ev = decode_nostr_event(txt: messageString) {
|
||||
DispatchQueue.main.async {
|
||||
self.handleEvent(.nostr_event(ev))
|
||||
}
|
||||
// NOTE: Once we switch to the local relay model,
|
||||
// we will not need to verify nostr events at this point.
|
||||
if let ev = decode_and_verify_nostr_response(txt: messageString) {
|
||||
await self.handleEvent(.nostr_event(ev))
|
||||
return
|
||||
}
|
||||
print("failed to decode event \(messageString)")
|
||||
case .data(let messageData):
|
||||
if let messageString = String(data: messageData, encoding: .utf8) {
|
||||
receive(message: .string(messageString))
|
||||
await receive(message: .string(messageString))
|
||||
}
|
||||
@unknown default:
|
||||
print("An unexpected URLSessionWebSocketTask.Message was received.")
|
||||
560
damus/Core/Nostr/RelayPool.swift
Normal file
560
damus/Core/Nostr/RelayPool.swift
Normal file
@@ -0,0 +1,560 @@
|
||||
//
|
||||
// RelayPool.swift
|
||||
// damus
|
||||
//
|
||||
// Created by William Casarin on 2022-04-11.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
import Network
|
||||
|
||||
struct RelayHandler {
|
||||
let sub_id: String
|
||||
/// The filters that this handler will handle. Set this to `nil` if you want your handler to receive all events coming from the relays.
|
||||
let filters: [NostrFilter]?
|
||||
let to: [RelayURL]?
|
||||
var handler: AsyncStream<(RelayURL, NostrConnectionEvent)>.Continuation
|
||||
}
|
||||
|
||||
struct QueuedRequest {
|
||||
let req: NostrRequestType
|
||||
let relay: RelayURL
|
||||
let skip_ephemeral: Bool
|
||||
}
|
||||
|
||||
struct SeenEvent: Hashable {
|
||||
let relay_id: RelayURL
|
||||
let evid: NoteId
|
||||
}
|
||||
|
||||
/// Establishes and manages connections and subscriptions to a list of relays.
|
||||
actor RelayPool {
|
||||
@MainActor
|
||||
private(set) var relays: [Relay] = []
|
||||
var open: Bool = false
|
||||
var handlers: [RelayHandler] = []
|
||||
var request_queue: [QueuedRequest] = []
|
||||
var seen: [NoteId: Set<RelayURL>] = [:]
|
||||
var counts: [RelayURL: UInt64] = [:]
|
||||
var ndb: Ndb?
|
||||
/// The keypair used to authenticate with relays
|
||||
var keypair: Keypair?
|
||||
var message_received_function: (((String, RelayDescriptor)) -> Void)?
|
||||
var message_sent_function: (((String, Relay)) -> Void)?
|
||||
var delegate: Delegate?
|
||||
private(set) var signal: SignalModel = SignalModel()
|
||||
|
||||
let network_monitor = NWPathMonitor()
|
||||
private let network_monitor_queue = DispatchQueue(label: "io.damus.network_monitor")
|
||||
private var last_network_status: NWPath.Status = .unsatisfied
|
||||
|
||||
/// The limit of maximum concurrent subscriptions. Any subscriptions beyond this limit will be paused until subscriptions clear
|
||||
/// This is to avoid error states and undefined behaviour related to hitting subscription limits on the relays, by letting those wait instead — with the principle that although slower is not ideal, it is better than completely broken.
|
||||
static let MAX_CONCURRENT_SUBSCRIPTION_LIMIT = 14 // This number is only an educated guess based on some local experiments.
|
||||
|
||||
func close() async {
|
||||
await disconnect()
|
||||
await clearRelays()
|
||||
open = false
|
||||
handlers = []
|
||||
request_queue = []
|
||||
await clearSeen()
|
||||
counts = [:]
|
||||
keypair = nil
|
||||
}
|
||||
|
||||
@MainActor
|
||||
private func clearRelays() {
|
||||
relays = []
|
||||
}
|
||||
|
||||
private func clearSeen() {
|
||||
seen.removeAll()
|
||||
}
|
||||
|
||||
init(ndb: Ndb?, keypair: Keypair? = nil) {
|
||||
self.ndb = ndb
|
||||
self.keypair = keypair
|
||||
|
||||
network_monitor.pathUpdateHandler = { [weak self] path in
|
||||
Task { await self?.pathUpdateHandler(path: path) }
|
||||
}
|
||||
network_monitor.start(queue: network_monitor_queue)
|
||||
}
|
||||
|
||||
private func pathUpdateHandler(path: NWPath) async {
|
||||
if (path.status == .satisfied || path.status == .requiresConnection) && self.last_network_status != path.status {
|
||||
await self.connect_to_disconnected()
|
||||
}
|
||||
|
||||
if path.status != self.last_network_status {
|
||||
for relay in await self.relays {
|
||||
relay.connection.log?.add("Network state: \(path.status)")
|
||||
}
|
||||
}
|
||||
|
||||
self.last_network_status = path.status
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var our_descriptors: [RelayDescriptor] {
|
||||
return all_descriptors.filter { d in !d.ephemeral }
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var all_descriptors: [RelayDescriptor] {
|
||||
relays.map { r in r.descriptor }
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var num_connected: Int {
|
||||
return relays.reduce(0) { n, r in n + (r.connection.isConnected ? 1 : 0) }
|
||||
}
|
||||
|
||||
func remove_handler(sub_id: String) {
|
||||
self.handlers = handlers.filter {
|
||||
if $0.sub_id != sub_id {
|
||||
return true
|
||||
}
|
||||
else {
|
||||
$0.handler.finish()
|
||||
return false
|
||||
}
|
||||
}
|
||||
Log.debug("Removing %s handler, current: %d", for: .networking, sub_id, handlers.count)
|
||||
}
|
||||
|
||||
func ping() async {
|
||||
Log.info("Pinging %d relays", for: .networking, await relays.count)
|
||||
for relay in await relays {
|
||||
relay.connection.ping()
|
||||
}
|
||||
}
|
||||
|
||||
func register_handler(sub_id: String, filters: [NostrFilter]?, to relays: [RelayURL]? = nil, handler: AsyncStream<(RelayURL, NostrConnectionEvent)>.Continuation) async {
|
||||
while handlers.count > Self.MAX_CONCURRENT_SUBSCRIPTION_LIMIT {
|
||||
Log.debug("%s: Too many subscriptions, waiting for subscription pool to clear", for: .networking, sub_id)
|
||||
try? await Task.sleep(for: .seconds(1))
|
||||
}
|
||||
Log.debug("%s: Subscription pool cleared", for: .networking, sub_id)
|
||||
handlers = handlers.filter({ handler in
|
||||
if handler.sub_id == sub_id {
|
||||
Log.error("Duplicate handler detected for the same subscription ID. Overriding.", for: .networking)
|
||||
handler.handler.finish()
|
||||
return false
|
||||
}
|
||||
else {
|
||||
return true
|
||||
}
|
||||
})
|
||||
self.handlers.append(RelayHandler(sub_id: sub_id, filters: filters, to: relays, handler: handler))
|
||||
Log.debug("Registering %s handler, current: %d", for: .networking, sub_id, self.handlers.count)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func remove_relay(_ relay_id: RelayURL) async {
|
||||
var i: Int = 0
|
||||
|
||||
await self.disconnect(to: [relay_id])
|
||||
|
||||
for relay in relays {
|
||||
if relay.id == relay_id {
|
||||
relay.connection.disablePermanently()
|
||||
relays.remove(at: i)
|
||||
break
|
||||
}
|
||||
|
||||
i += 1
|
||||
}
|
||||
}
|
||||
|
||||
func add_relay(_ desc: RelayDescriptor) async throws(RelayError) {
|
||||
let relay_id = desc.url
|
||||
if await get_relay(relay_id) != nil {
|
||||
throw RelayError.RelayAlreadyExists
|
||||
}
|
||||
let conn = RelayConnection(url: desc.url, handleEvent: { event in
|
||||
await self.handle_event(relay_id: relay_id, event: event)
|
||||
}, processUnverifiedWSEvent: { wsev in
|
||||
guard case .message(let msg) = wsev,
|
||||
case .string(let str) = msg
|
||||
else { return }
|
||||
|
||||
let _ = self.ndb?.processEvent(str, originRelayURL: relay_id)
|
||||
self.message_received_function?((str, desc))
|
||||
})
|
||||
let relay = Relay(descriptor: desc, connection: conn)
|
||||
await self.appendRelayToList(relay: relay)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
private func appendRelayToList(relay: Relay) {
|
||||
self.relays.append(relay)
|
||||
}
|
||||
|
||||
func setLog(_ log: RelayLog, for relay_id: RelayURL) async {
|
||||
// add the current network state to the log
|
||||
log.add("Network state: \(network_monitor.currentPath.status)")
|
||||
|
||||
await get_relay(relay_id)?.connection.log = log
|
||||
}
|
||||
|
||||
/// This is used to retry dead connections
|
||||
func connect_to_disconnected() async {
|
||||
for relay in await relays {
|
||||
let c = relay.connection
|
||||
|
||||
let is_connecting = c.isConnecting
|
||||
|
||||
if is_connecting && (Date.now.timeIntervalSince1970 - c.last_connection_attempt) > 5 {
|
||||
print("stale connection detected (\(relay.descriptor.url.absoluteString)). retrying...")
|
||||
relay.connection.reconnect()
|
||||
} else if relay.is_broken || is_connecting || c.isConnected {
|
||||
continue
|
||||
} else {
|
||||
relay.connection.reconnect()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func reconnect(to targetRelays: [RelayURL]? = nil) async {
|
||||
let relays = await getRelays(targetRelays: targetRelays)
|
||||
for relay in relays {
|
||||
// don't try to reconnect to broken relays
|
||||
relay.connection.reconnect()
|
||||
}
|
||||
}
|
||||
|
||||
func connect(to targetRelays: [RelayURL]? = nil) async {
|
||||
let relays = await getRelays(targetRelays: targetRelays)
|
||||
for relay in relays {
|
||||
relay.connection.connect()
|
||||
}
|
||||
// Mark as open last, to prevent other classes from pulling data before the relays are actually connected
|
||||
open = true
|
||||
}
|
||||
|
||||
func disconnect(to targetRelays: [RelayURL]? = nil) async {
|
||||
// Mark as closed first, to prevent other classes from pulling data while the relays are being disconnected
|
||||
open = false
|
||||
let relays = await getRelays(targetRelays: targetRelays)
|
||||
for relay in relays {
|
||||
relay.connection.disconnect()
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func getRelays(targetRelays: [RelayURL]? = nil) -> [Relay] {
|
||||
targetRelays.map{ get_relays($0) } ?? self.relays
|
||||
}
|
||||
|
||||
/// Deletes queued up requests that should not persist between app sessions (i.e. when the app goes to background then back to foreground)
|
||||
func cleanQueuedRequestForSessionEnd() {
|
||||
request_queue = request_queue.filter { request in
|
||||
guard case .typical(let typicalRequest) = request.req else { return true }
|
||||
switch typicalRequest {
|
||||
case .subscribe(_):
|
||||
return true
|
||||
case .unsubscribe(_):
|
||||
return false // Do not persist unsubscribe requests to prevent them to race against subscribe requests when we come back to the foreground.
|
||||
case .event(_):
|
||||
return true
|
||||
case .auth(_):
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unsubscribe(sub_id: String, to: [RelayURL]? = nil) async {
|
||||
if to == nil {
|
||||
self.remove_handler(sub_id: sub_id)
|
||||
}
|
||||
await self.send(.unsubscribe(sub_id), to: to)
|
||||
}
|
||||
|
||||
func subscribe(sub_id: String, filters: [NostrFilter], handler: AsyncStream<(RelayURL, NostrConnectionEvent)>.Continuation, to: [RelayURL]? = nil) {
|
||||
Task {
|
||||
await register_handler(sub_id: sub_id, filters: filters, to: to, handler: handler)
|
||||
|
||||
// When the caller specifies no relays, it is implied that the user wants to use the ones in the user relay list. Skip ephemeral relays in that case.
|
||||
// When the caller specifies specific relays, do not skip ephemeral relays to respect the exact list given by the caller.
|
||||
let shouldSkipEphemeralRelays = to == nil ? true : false
|
||||
|
||||
await send(.subscribe(.init(filters: filters, sub_id: sub_id)), to: to, skip_ephemeral: shouldSkipEphemeralRelays)
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribes to data from the `RelayPool` based on a filter and a list of desired relays.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - filters: The filters specifying the desired content.
|
||||
/// - desiredRelays: The desired relays which to subsctibe to. If `nil`, it defaults to the `RelayPool`'s default list
|
||||
/// - eoseTimeout: The maximum timeout which to give up waiting for the eoseSignal
|
||||
/// - Returns: Returns an async stream that callers can easily consume via a for-loop
|
||||
func subscribe(filters: [NostrFilter], to desiredRelays: [RelayURL]? = nil, eoseTimeout: Duration? = nil, id: UUID? = nil) async -> AsyncStream<StreamItem> {
|
||||
let eoseTimeout = eoseTimeout ?? .seconds(5)
|
||||
let desiredRelays = await getRelays(targetRelays: desiredRelays)
|
||||
let startTime = CFAbsoluteTimeGetCurrent()
|
||||
return AsyncStream<StreamItem> { continuation in
|
||||
let id = id ?? UUID()
|
||||
let sub_id = id.uuidString
|
||||
var seenEvents: Set<NoteId> = []
|
||||
var relaysWhoFinishedInitialResults: Set<RelayURL> = []
|
||||
var eoseSent = false
|
||||
let upstreamStream = AsyncStream<(RelayURL, NostrConnectionEvent)> { upstreamContinuation in
|
||||
self.subscribe(sub_id: sub_id, filters: filters, handler: upstreamContinuation, to: desiredRelays.map({ $0.descriptor.url }))
|
||||
}
|
||||
let upstreamStreamingTask = Task {
|
||||
for await (relayUrl, connectionEvent) in upstreamStream {
|
||||
try Task.checkCancellation()
|
||||
switch connectionEvent {
|
||||
case .ws_connection_event(let ev):
|
||||
// Websocket events such as connect/disconnect/error are already handled in `RelayConnection`. Do not perform any handling here.
|
||||
// For the future, perhaps we should abstract away `.ws_connection_event` in `RelayPool`? Seems like something to be handled on the `RelayConnection` layer.
|
||||
break
|
||||
case .nostr_event(let nostrResponse):
|
||||
guard nostrResponse.subid == sub_id else { return } // Do not stream items that do not belong in this subscription
|
||||
switch nostrResponse {
|
||||
case .event(_, let nostrEvent):
|
||||
if seenEvents.contains(nostrEvent.id) { break } // Don't send two of the same events.
|
||||
continuation.yield(with: .success(.event(nostrEvent)))
|
||||
seenEvents.insert(nostrEvent.id)
|
||||
case .notice(let note):
|
||||
break // We do not support handling these yet
|
||||
case .eose(_):
|
||||
relaysWhoFinishedInitialResults.insert(relayUrl)
|
||||
let desiredAndConnectedRelays = desiredRelays.filter({ $0.connection.isConnected }).map({ $0.descriptor.url })
|
||||
Log.debug("RelayPool subscription %s: EOSE from %s. EOSE count: %d/%d. Elapsed: %.2f seconds.", for: .networking, id.uuidString, relayUrl.absoluteString, relaysWhoFinishedInitialResults.count, Set(desiredAndConnectedRelays).count, CFAbsoluteTimeGetCurrent() - startTime)
|
||||
if relaysWhoFinishedInitialResults == Set(desiredAndConnectedRelays) {
|
||||
continuation.yield(with: .success(.eose))
|
||||
eoseSent = true
|
||||
}
|
||||
case .ok(_): break // No need to handle this, we are not sending an event to the relay
|
||||
case .auth(_): break // Handled in a separate function in RelayPool
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let timeoutTask = Task {
|
||||
try? await Task.sleep(for: eoseTimeout)
|
||||
if !eoseSent { continuation.yield(with: .success(.eose)) }
|
||||
}
|
||||
continuation.onTermination = { @Sendable termination in
|
||||
switch termination {
|
||||
case .finished:
|
||||
Log.debug("RelayPool subscription %s finished. Closing.", for: .networking, sub_id)
|
||||
case .cancelled:
|
||||
Log.debug("RelayPool subscription %s cancelled. Closing.", for: .networking, sub_id)
|
||||
@unknown default:
|
||||
break
|
||||
}
|
||||
Task {
|
||||
await self.unsubscribe(sub_id: sub_id, to: desiredRelays.map({ $0.descriptor.url }))
|
||||
await self.remove_handler(sub_id: sub_id)
|
||||
}
|
||||
timeoutTask.cancel()
|
||||
upstreamStreamingTask.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum StreamItem {
|
||||
/// A Nostr event
|
||||
case event(NostrEvent)
|
||||
/// The "end of stored events" signal
|
||||
case eose
|
||||
}
|
||||
|
||||
func subscribe_to(sub_id: String, filters: [NostrFilter], to: [RelayURL]?, handler: AsyncStream<(RelayURL, NostrConnectionEvent)>.Continuation) {
|
||||
Task {
|
||||
await register_handler(sub_id: sub_id, filters: filters, to: to, handler: handler)
|
||||
|
||||
await send(.subscribe(.init(filters: filters, sub_id: sub_id)), to: to)
|
||||
}
|
||||
}
|
||||
|
||||
func count_queued(relay: RelayURL) -> Int {
|
||||
var c = 0
|
||||
for request in request_queue {
|
||||
if request.relay == relay {
|
||||
c += 1
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func queue_req(r: NostrRequestType, relay: RelayURL, skip_ephemeral: Bool) {
|
||||
let count = count_queued(relay: relay)
|
||||
guard count <= 10 else {
|
||||
print("can't queue, too many queued events for \(relay)")
|
||||
return
|
||||
}
|
||||
|
||||
print("queueing request for \(relay)")
|
||||
request_queue.append(QueuedRequest(req: r, relay: relay, skip_ephemeral: skip_ephemeral))
|
||||
}
|
||||
|
||||
func send_raw_to_local_ndb(_ req: NostrRequestType) {
|
||||
// send to local relay (nostrdb)
|
||||
switch req {
|
||||
case .typical(let r):
|
||||
if case .event = r, let rstr = make_nostr_req(r) {
|
||||
let _ = ndb?.process_client_event(rstr)
|
||||
}
|
||||
case .custom(let string):
|
||||
let _ = ndb?.process_client_event(string)
|
||||
}
|
||||
}
|
||||
|
||||
func send_raw(_ req: NostrRequestType, to: [RelayURL]? = nil, skip_ephemeral: Bool = true) async {
|
||||
let relays = await getRelays(targetRelays: to)
|
||||
|
||||
self.send_raw_to_local_ndb(req) // Always send Nostr events and data to NostrDB for a local copy
|
||||
|
||||
for relay in relays {
|
||||
if req.is_read && !(relay.descriptor.info.canRead) {
|
||||
continue // Do not send read requests to relays that are not READ relays
|
||||
}
|
||||
|
||||
if req.is_write && !(relay.descriptor.info.canWrite) {
|
||||
continue // Do not send write requests to relays that are not WRITE relays
|
||||
}
|
||||
|
||||
if relay.descriptor.ephemeral && skip_ephemeral {
|
||||
continue // Do not send requests to ephemeral relays if we want to skip them
|
||||
}
|
||||
|
||||
guard relay.connection.isConnected else {
|
||||
Task { await queue_req(r: req, relay: relay.id, skip_ephemeral: skip_ephemeral) }
|
||||
continue
|
||||
}
|
||||
|
||||
relay.connection.send(req, callback: { str in
|
||||
self.message_sent_function?((str, relay))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func send(_ req: NostrRequest, to: [RelayURL]? = nil, skip_ephemeral: Bool = true) async {
|
||||
await send_raw(.typical(req), to: to, skip_ephemeral: skip_ephemeral)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func get_relays(_ ids: [RelayURL]) -> [Relay] {
|
||||
// don't include ephemeral relays in the default list to query
|
||||
relays.filter { ids.contains($0.id) }
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func get_relay(_ id: RelayURL) -> Relay? {
|
||||
relays.first(where: { $0.id == id })
|
||||
}
|
||||
|
||||
func run_queue(_ relay_id: RelayURL) {
|
||||
self.request_queue = request_queue.reduce(into: Array<QueuedRequest>()) { (q, req) in
|
||||
guard req.relay == relay_id else {
|
||||
q.append(req)
|
||||
return
|
||||
}
|
||||
|
||||
print("running queueing request: \(req.req) for \(relay_id)")
|
||||
Task { await self.send_raw(req.req, to: [relay_id], skip_ephemeral: false) }
|
||||
}
|
||||
}
|
||||
|
||||
func record_seen(relay_id: RelayURL, event: NostrConnectionEvent) {
|
||||
if case .nostr_event(let ev) = event {
|
||||
if case .event(_, let nev) = ev {
|
||||
if seen[nev.id]?.contains(relay_id) == true {
|
||||
return
|
||||
}
|
||||
seen[nev.id, default: Set()].insert(relay_id)
|
||||
counts[relay_id, default: 0] += 1
|
||||
notify(.update_stats(note_id: nev.id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resubscribeAll(relayId: RelayURL) async {
|
||||
for handler in self.handlers {
|
||||
guard let filters = handler.filters else { continue }
|
||||
// When the caller specifies no relays, it is implied that the user wants to use the ones in the user relay list. Skip ephemeral relays in that case.
|
||||
// When the caller specifies specific relays, do not skip ephemeral relays to respect the exact list given by the caller.
|
||||
let shouldSkipEphemeralRelays = handler.to == nil ? true : false
|
||||
|
||||
if let handlerTargetRelays = handler.to,
|
||||
!handlerTargetRelays.contains(where: { $0 == relayId }) {
|
||||
// Not part of the target relays, skip
|
||||
continue
|
||||
}
|
||||
|
||||
Log.debug("%s: Sending resubscribe request to %s", for: .networking, handler.sub_id, relayId.absoluteString)
|
||||
await send(.subscribe(.init(filters: filters, sub_id: handler.sub_id)), to: [relayId], skip_ephemeral: shouldSkipEphemeralRelays)
|
||||
}
|
||||
}
|
||||
|
||||
func handle_event(relay_id: RelayURL, event: NostrConnectionEvent) async {
|
||||
record_seen(relay_id: relay_id, event: event)
|
||||
|
||||
// When we reconnect, do two things
|
||||
// - Send messages that were stored in the queue
|
||||
// - Re-subscribe to filters we had subscribed before
|
||||
if case .ws_connection_event(let ws) = event {
|
||||
if case .connected = ws {
|
||||
run_queue(relay_id)
|
||||
await self.resubscribeAll(relayId: relay_id)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle auth
|
||||
if case let .nostr_event(nostrResponse) = event,
|
||||
case let .auth(challenge_string) = nostrResponse {
|
||||
if let relay = await get_relay(relay_id) {
|
||||
print("received auth request from \(relay.descriptor.url.id)")
|
||||
relay.authentication_state = .pending
|
||||
if let keypair {
|
||||
if let fullKeypair = keypair.to_full() {
|
||||
if let authRequest = make_auth_request(keypair: fullKeypair, challenge_string: challenge_string, relay: relay) {
|
||||
await send(.auth(authRequest), to: [relay_id], skip_ephemeral: false)
|
||||
relay.authentication_state = .verified
|
||||
} else {
|
||||
print("failed to make auth request")
|
||||
}
|
||||
} else {
|
||||
print("keypair provided did not contain private key, can not sign auth request")
|
||||
relay.authentication_state = .error(.no_private_key)
|
||||
}
|
||||
} else {
|
||||
print("no keypair to reply to auth request")
|
||||
relay.authentication_state = .error(.no_key)
|
||||
}
|
||||
} else {
|
||||
print("no relay found for \(relay_id)")
|
||||
}
|
||||
}
|
||||
|
||||
for handler in handlers {
|
||||
// We send data to the handlers if:
|
||||
// - the subscription ID matches, or
|
||||
// - the handler filters is `nil`, which is used in some cases as a blanket "give me all notes" (e.g. during signup)
|
||||
guard handler.sub_id == event.subId || handler.filters == nil else { continue }
|
||||
logStreamPipelineStats("RelayPool_\(relay_id.absoluteString)", "RelayPool_Handler_\(handler.sub_id)")
|
||||
handler.handler.yield((relay_id, event))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func add_rw_relay(_ pool: RelayPool, _ url: RelayURL) async {
|
||||
try? await pool.add_relay(RelayPool.RelayDescriptor(url: url, info: .readWrite))
|
||||
}
|
||||
|
||||
|
||||
extension RelayPool {
|
||||
protocol Delegate {
|
||||
func latestRelayListChanged(_ newEvent: NdbNote)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
import Foundation
|
||||
|
||||
public struct RelayURL: Hashable, Equatable, Codable, CodingKeyRepresentable, Identifiable, Comparable, CustomStringConvertible {
|
||||
public struct RelayURL: Hashable, Equatable, Codable, CodingKeyRepresentable, Identifiable, Comparable, CustomStringConvertible, Sendable {
|
||||
private(set) var url: URL
|
||||
|
||||
public var id: URL {
|
||||
@@ -9,12 +9,13 @@ import Foundation
|
||||
import LinkPresentation
|
||||
import EmojiPicker
|
||||
|
||||
class DamusState: HeadlessDamusState {
|
||||
class DamusState: HeadlessDamusState, ObservableObject {
|
||||
let keypair: Keypair
|
||||
let likes: EventCounter
|
||||
let boosts: EventCounter
|
||||
let quote_reposts: EventCounter
|
||||
let contacts: Contacts
|
||||
let contactCards: ContactCard
|
||||
let mutelist_manager: MutelistManager
|
||||
let profiles: Profiles
|
||||
let dms: DirectMessagesModel
|
||||
@@ -38,12 +39,14 @@ class DamusState: HeadlessDamusState {
|
||||
let emoji_provider: EmojiProvider
|
||||
let favicon_cache: FaviconCache
|
||||
private(set) var nostrNetwork: NostrNetworkManager
|
||||
var snapshotManager: DatabaseSnapshotManager
|
||||
|
||||
init(keypair: Keypair, likes: EventCounter, boosts: EventCounter, contacts: Contacts, mutelist_manager: MutelistManager, profiles: Profiles, dms: DirectMessagesModel, previews: PreviewCache, zaps: Zaps, lnurls: LNUrls, settings: UserSettingsStore, relay_filters: RelayFilters, relay_model_cache: RelayModelCache, drafts: Drafts, events: EventCache, bookmarks: BookmarksManager, replies: ReplyCounter, wallet: WalletModel, nav: NavigationCoordinator, music: MusicController?, video: DamusVideoCoordinator, ndb: Ndb, purple: DamusPurple? = nil, quote_reposts: EventCounter, emoji_provider: EmojiProvider, favicon_cache: FaviconCache) {
|
||||
init(keypair: Keypair, likes: EventCounter, boosts: EventCounter, contacts: Contacts, contactCards: ContactCard, mutelist_manager: MutelistManager, profiles: Profiles, dms: DirectMessagesModel, previews: PreviewCache, zaps: Zaps, lnurls: LNUrls, settings: UserSettingsStore, relay_filters: RelayFilters, relay_model_cache: RelayModelCache, drafts: Drafts, events: EventCache, bookmarks: BookmarksManager, replies: ReplyCounter, wallet: WalletModel, nav: NavigationCoordinator, music: MusicController?, video: DamusVideoCoordinator, ndb: Ndb, purple: DamusPurple? = nil, quote_reposts: EventCounter, emoji_provider: EmojiProvider, favicon_cache: FaviconCache, addNdbToRelayPool: Bool = true) {
|
||||
self.keypair = keypair
|
||||
self.likes = likes
|
||||
self.boosts = boosts
|
||||
self.contacts = contacts
|
||||
self.contactCards = contactCards
|
||||
self.mutelist_manager = mutelist_manager
|
||||
self.profiles = profiles
|
||||
self.dms = dms
|
||||
@@ -72,13 +75,16 @@ class DamusState: HeadlessDamusState {
|
||||
self.favicon_cache = FaviconCache()
|
||||
|
||||
let networkManagerDelegate = NostrNetworkManagerDelegate(settings: settings, contacts: contacts, ndb: ndb, keypair: keypair, relayModelCache: relay_model_cache, relayFilters: relay_filters)
|
||||
self.nostrNetwork = NostrNetworkManager(delegate: networkManagerDelegate)
|
||||
let nostrNetwork = NostrNetworkManager(delegate: networkManagerDelegate, addNdbToRelayPool: addNdbToRelayPool)
|
||||
self.nostrNetwork = nostrNetwork
|
||||
self.wallet.nostrNetwork = nostrNetwork
|
||||
self.snapshotManager = .init(ndb: ndb)
|
||||
}
|
||||
|
||||
@MainActor
|
||||
convenience init?(keypair: Keypair) {
|
||||
convenience init?(keypair: Keypair, owns_db_file: Bool) {
|
||||
// nostrdb
|
||||
var mndb = Ndb()
|
||||
var mndb = Ndb(owns_db_file: owns_db_file)
|
||||
if mndb == nil {
|
||||
// try recovery
|
||||
print("DB ISSUE! RECOVERING")
|
||||
@@ -109,6 +115,7 @@ class DamusState: HeadlessDamusState {
|
||||
likes: EventCounter(our_pubkey: pubkey),
|
||||
boosts: EventCounter(our_pubkey: pubkey),
|
||||
contacts: Contacts(our_pubkey: pubkey),
|
||||
contactCards: ContactCardManager(),
|
||||
mutelist_manager: MutelistManager(user_keypair: keypair),
|
||||
profiles: Profiles(ndb: ndb),
|
||||
dms: home.dms,
|
||||
@@ -122,7 +129,7 @@ class DamusState: HeadlessDamusState {
|
||||
events: EventCache(ndb: ndb),
|
||||
bookmarks: BookmarksManager(pubkey: pubkey),
|
||||
replies: ReplyCounter(our_pubkey: pubkey),
|
||||
wallet: WalletModel(settings: settings),
|
||||
wallet: WalletModel(settings: settings), // nostrNetwork is connected after initialization
|
||||
nav: navigationCoordinator,
|
||||
music: MusicController(onChange: { _ in }),
|
||||
video: DamusVideoCoordinator(),
|
||||
@@ -164,10 +171,13 @@ class DamusState: HeadlessDamusState {
|
||||
try await self.push_notification_client.revoke_token()
|
||||
}
|
||||
wallet.disconnect()
|
||||
nostrNetwork.pool.close()
|
||||
ndb.close()
|
||||
Task {
|
||||
await nostrNetwork.close() // Close ndb streaming tasks before closing ndb to avoid memory errors
|
||||
ndb.close()
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor
|
||||
static var empty: DamusState {
|
||||
let empty_pub: Pubkey = .empty
|
||||
let empty_sec: Privkey = .empty
|
||||
@@ -178,6 +188,7 @@ class DamusState: HeadlessDamusState {
|
||||
likes: EventCounter(our_pubkey: empty_pub),
|
||||
boosts: EventCounter(our_pubkey: empty_pub),
|
||||
contacts: Contacts(our_pubkey: empty_pub),
|
||||
contactCards: ContactCardManagerMock(),
|
||||
mutelist_manager: MutelistManager(user_keypair: kp),
|
||||
profiles: Profiles(ndb: .empty),
|
||||
dms: DirectMessagesModel(our_pubkey: empty_pub),
|
||||
@@ -216,9 +227,11 @@ fileprivate extension DamusState {
|
||||
set { self.settings.latestRelayListEventIdHex = newValue }
|
||||
}
|
||||
|
||||
@MainActor
|
||||
var latestContactListEvent: NostrEvent? { self.contacts.event }
|
||||
var bootstrapRelays: [RelayURL] { get_default_bootstrap_relays() }
|
||||
var developerMode: Bool { self.settings.developer_mode }
|
||||
var experimentalLocalRelayModelSupport: Bool { self.settings.enable_experimental_local_relay_model }
|
||||
var relayModelCache: RelayModelCache
|
||||
var relayFilters: RelayFilters
|
||||
|
||||
195
damus/Core/Storage/DatabaseSnapshotManager.swift
Normal file
195
damus/Core/Storage/DatabaseSnapshotManager.swift
Normal file
@@ -0,0 +1,195 @@
|
||||
//
|
||||
// DatabaseSnapshotManager.swift
|
||||
// damus
|
||||
//
|
||||
// Created on 2025-01-20.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
import OSLog
|
||||
|
||||
/// Manages periodic snapshots of the main NostrDB database to a shared container location.
|
||||
///
|
||||
/// This allows app extensions (like notification service extensions) to access a recent
|
||||
/// read-only copy of the database for enhanced UX, while the main database resides in
|
||||
/// the private container to avoid 0xdead10cc crashes and issues related to holding file locks on shared containers.
|
||||
///
|
||||
/// Snapshots are created periodically while the app is in the foreground, since the database
|
||||
/// only gets updated when the app is active.
|
||||
actor DatabaseSnapshotManager {
|
||||
|
||||
/// Minimum interval between snapshots (in seconds)
|
||||
private static let minimumSnapshotInterval: TimeInterval = 60 * 60 // 1 hour
|
||||
|
||||
/// Key for storing last snapshot timestamp in UserDefaults
|
||||
private static let lastSnapshotDateKey = "lastDatabaseSnapshotDate"
|
||||
|
||||
private let ndb: Ndb
|
||||
private var snapshotTimerTask: Task<Void, Never>? = nil
|
||||
var snapshotTimerTickCount: Int = 0
|
||||
var snapshotCount: Int = 0
|
||||
|
||||
/// Initialize the snapshot manager with a NostrDB instance
|
||||
/// - Parameter ndb: The NostrDB instance to snapshot
|
||||
init(ndb: Ndb) {
|
||||
self.ndb = ndb
|
||||
}
|
||||
|
||||
// MARK: - Periodic tasks management
|
||||
|
||||
/// Start the periodic snapshot timer.
|
||||
///
|
||||
/// This should be called when the app enters the foreground.
|
||||
/// The timer will fire periodically to check if a snapshot is needed.
|
||||
func startPeriodicSnapshots() {
|
||||
// Don't start if already running
|
||||
guard snapshotTimerTask == nil else {
|
||||
Log.debug("Snapshot timer already running", for: .storage)
|
||||
return
|
||||
}
|
||||
|
||||
Log.info("Starting periodic database snapshot timer", for: .storage)
|
||||
|
||||
snapshotTimerTask = Task(priority: .utility) { [weak self] in
|
||||
while !Task.isCancelled {
|
||||
guard let self else { return }
|
||||
Log.debug("Snapshot timer - tick", for: .storage)
|
||||
await self.increaseSnapshotTimerTickCount()
|
||||
do {
|
||||
try await self.createSnapshotIfNeeded()
|
||||
}
|
||||
catch {
|
||||
Log.error("Failed to create snapshot: %{public}@", for: .storage, error.localizedDescription)
|
||||
}
|
||||
try? await Task.sleep(for: .seconds(60 * 5), tolerance: .seconds(10))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the periodic snapshot timer.
|
||||
///
|
||||
/// This should be called when the app enters the background.
|
||||
func stopPeriodicSnapshots() async {
|
||||
guard snapshotTimerTask != nil else {
|
||||
return
|
||||
}
|
||||
|
||||
Log.info("Stopping periodic database snapshot timer", for: .storage)
|
||||
snapshotTimerTask?.cancel()
|
||||
await snapshotTimerTask?.value
|
||||
snapshotTimerTask = nil
|
||||
}
|
||||
|
||||
|
||||
// MARK: - Snapshotting
|
||||
|
||||
/// Perform a database snapshot if needed.
|
||||
///
|
||||
/// This method checks if enough time has passed since the last snapshot and creates a new one if necessary.
|
||||
@discardableResult
|
||||
func createSnapshotIfNeeded() async throws -> Bool {
|
||||
guard shouldCreateSnapshot() else {
|
||||
Log.debug("Skipping snapshot - minimum interval not yet elapsed", for: .storage)
|
||||
return false
|
||||
}
|
||||
|
||||
try await self.performSnapshot()
|
||||
return true
|
||||
}
|
||||
|
||||
/// Check if a snapshot should be created based on the last snapshot time.
|
||||
private func shouldCreateSnapshot() -> Bool {
|
||||
guard let lastSnapshotDate = UserDefaults.standard.object(forKey: Self.lastSnapshotDateKey) as? Date else {
|
||||
return true // No snapshot has been created yet
|
||||
}
|
||||
|
||||
let timeSinceLastSnapshot = Date().timeIntervalSince(lastSnapshotDate)
|
||||
return timeSinceLastSnapshot >= Self.minimumSnapshotInterval
|
||||
}
|
||||
|
||||
/// Perform the actual snapshot operation.
|
||||
func performSnapshot() async throws {
|
||||
guard let snapshotPath = Ndb.snapshot_db_path else {
|
||||
throw SnapshotError.pathsUnavailable
|
||||
}
|
||||
|
||||
Log.info("Starting nostrdb snapshot to %{public}@", for: .storage, snapshotPath)
|
||||
|
||||
try await copyDatabase(to: snapshotPath)
|
||||
|
||||
// Update the last snapshot date
|
||||
UserDefaults.standard.set(Date(), forKey: Self.lastSnapshotDateKey)
|
||||
|
||||
Log.info("Database snapshot completed successfully", for: .storage)
|
||||
self.snapshotCount += 1
|
||||
}
|
||||
|
||||
/// Copy the database using LMDB's native copy function.
|
||||
private func copyDatabase(to snapshotPath: String) async throws {
|
||||
return try await withCheckedThrowingContinuation { continuation in
|
||||
let fileManager = FileManager.default
|
||||
|
||||
// Delete existing database files at the destination if they exist
|
||||
// LMDB creates multiple files (data.mdb, lock.mdb), so we remove the entire directory
|
||||
if fileManager.fileExists(atPath: snapshotPath) {
|
||||
do {
|
||||
try fileManager.removeItem(atPath: snapshotPath)
|
||||
Log.debug("Removed existing snapshot at %{public}@", for: .storage, snapshotPath)
|
||||
} catch {
|
||||
continuation.resume(throwing: SnapshotError.removeFailed(error))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
Log.debug("Recreate the snapshot directory", for: .storage, snapshotPath)
|
||||
// Recreate the snapshot directory
|
||||
do {
|
||||
try fileManager.createDirectory(atPath: snapshotPath, withIntermediateDirectories: true)
|
||||
} catch {
|
||||
continuation.resume(throwing: SnapshotError.directoryCreationFailed(error))
|
||||
return
|
||||
}
|
||||
|
||||
do {
|
||||
try ndb.snapshot(path: snapshotPath)
|
||||
continuation.resume(returning: ())
|
||||
}
|
||||
catch {
|
||||
continuation.resume(throwing: SnapshotError.copyFailed(error))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Stats functions
|
||||
|
||||
private func increaseSnapshotTimerTickCount() async {
|
||||
self.snapshotTimerTickCount += 1
|
||||
}
|
||||
|
||||
func resetStats() async {
|
||||
self.snapshotTimerTickCount = 0
|
||||
self.snapshotCount = 0
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Error Types
|
||||
|
||||
enum SnapshotError: Error, LocalizedError {
|
||||
case pathsUnavailable
|
||||
case copyFailed(any Error)
|
||||
case removeFailed(Error)
|
||||
case directoryCreationFailed(Error)
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .pathsUnavailable:
|
||||
return "Database paths are not available"
|
||||
case .copyFailed(let code):
|
||||
return "Failed to copy database (error code: \(code))"
|
||||
case .removeFailed(let error):
|
||||
return "Failed to remove existing snapshot: \(error.localizedDescription)"
|
||||
case .directoryCreationFailed(let error):
|
||||
return "Failed to create snapshot directory: \(error.localizedDescription)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,22 +2,11 @@
|
||||
// Block.swift
|
||||
// damus
|
||||
//
|
||||
// Created by Kyle Roucis on 2023-08-21.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
|
||||
|
||||
fileprivate extension String {
|
||||
/// Failable initializer to build a Swift.String from a C-backed `str_block_t`.
|
||||
init?(_ s: str_block_t) {
|
||||
let len = s.end - s.start
|
||||
let bytes = Data(bytes: s.start, count: len)
|
||||
self.init(bytes: bytes, encoding: .utf8)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a block of data stored by the NOSTR protocol. This can be
|
||||
/// Represents a block of data stored in nostrdb. This can be
|
||||
/// simple text, a hashtag, a url, a relay reference, a mention ref and
|
||||
/// potentially more in the future.
|
||||
enum Block: Equatable {
|
||||
@@ -38,22 +27,6 @@ enum Block: Equatable {
|
||||
}
|
||||
}
|
||||
|
||||
var is_previewable: Bool {
|
||||
switch self {
|
||||
case .mention(let m):
|
||||
switch m.ref {
|
||||
case .note, .nevent: return true
|
||||
default: return false
|
||||
}
|
||||
case .invoice:
|
||||
return true
|
||||
case .url:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
case text(String)
|
||||
case mention(Mention<MentionRef>)
|
||||
case hashtag(String)
|
||||
@@ -67,61 +40,56 @@ struct Blocks: Equatable {
|
||||
let blocks: [Block]
|
||||
}
|
||||
|
||||
extension ndb_str_block {
|
||||
func as_str() -> String {
|
||||
let buf = UnsafeBufferPointer(start: self.str, count: Int(self.len))
|
||||
let uint8Buf = buf.map { UInt8(bitPattern: $0) }
|
||||
return String(decoding: uint8Buf, as: UTF8.self)
|
||||
}
|
||||
}
|
||||
|
||||
extension ndb_block_ptr {
|
||||
func as_str() -> String {
|
||||
guard let str_block = ndb_block_str(self.ptr) else {
|
||||
return ""
|
||||
}
|
||||
return str_block.pointee.as_str()
|
||||
}
|
||||
|
||||
var block: ndb_block.__Unnamed_union_block {
|
||||
self.ptr.pointee.block
|
||||
}
|
||||
}
|
||||
|
||||
extension Block {
|
||||
/// Failable initializer for the C-backed type `block_t`. This initializer will inspect
|
||||
/// the underlying block type and build the appropriate enum value as needed.
|
||||
init?(_ block: block_t, tags: TagsSequence? = nil) {
|
||||
switch block.type {
|
||||
init?(block: ndb_block_ptr, tags: TagsSequence?) {
|
||||
switch ndb_get_block_type(block.ptr) {
|
||||
case BLOCK_HASHTAG:
|
||||
guard let str = String(block.block.str) else {
|
||||
return nil
|
||||
}
|
||||
self = .hashtag(str)
|
||||
self = .hashtag(block.as_str())
|
||||
case BLOCK_TEXT:
|
||||
guard let str = String(block.block.str) else {
|
||||
return nil
|
||||
}
|
||||
self = .text(str)
|
||||
self = .text(block.as_str())
|
||||
case BLOCK_MENTION_INDEX:
|
||||
guard let b = Block(index: Int(block.block.mention_index), tags: tags) else {
|
||||
return nil
|
||||
}
|
||||
self = b
|
||||
case BLOCK_URL:
|
||||
guard let b = Block(block.block.str) else {
|
||||
return nil
|
||||
}
|
||||
self = b
|
||||
guard let url = URL(string: block.as_str()) else { return nil }
|
||||
self = .url(url)
|
||||
case BLOCK_INVOICE:
|
||||
guard let b = Block(invoice: block.block.invoice) else {
|
||||
return nil
|
||||
}
|
||||
guard let b = Block(invoice: block.block.invoice) else { return nil }
|
||||
self = b
|
||||
case BLOCK_MENTION_BECH32:
|
||||
guard let b = Block(bech32: block.block.mention_bech32) else {
|
||||
return nil
|
||||
}
|
||||
guard let b = Block(bech32: block.block.mention_bech32) else { return nil }
|
||||
self = b
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
fileprivate extension Block {
|
||||
/// Failable initializer for the C-backed type `str_block_t`.
|
||||
init?(_ b: str_block_t) {
|
||||
guard let str = String(b) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if let url = URL(string: str) {
|
||||
self = .url(url)
|
||||
}
|
||||
else {
|
||||
self = .text(str)
|
||||
}
|
||||
}
|
||||
}
|
||||
fileprivate extension Block {
|
||||
/// Failable initializer for a block index and a tag sequence.
|
||||
init?(index: Int, tags: TagsSequence? = nil) {
|
||||
@@ -143,34 +111,34 @@ fileprivate extension Block {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fileprivate extension Block {
|
||||
/// Failable initializer for the C-backed type `invoice_block_t`.
|
||||
init?(invoice: invoice_block_t) {
|
||||
guard let invstr = String(invoice.invstr) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
guard var b11 = maybe_pointee(invoice.bolt11) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
guard let description = convert_invoice_description(b11: b11) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
let amount: Amount = maybe_pointee(b11.msat).map { .specific(Int64($0.millisatoshis)) } ?? .any
|
||||
let payment_hash = Data(bytes: &b11.payment_hash, count: 32)
|
||||
let created_at = b11.timestamp
|
||||
|
||||
tal_free(invoice.bolt11)
|
||||
self = .invoice(Invoice(description: description, amount: amount, string: invstr, expiry: b11.expiry, payment_hash: payment_hash, created_at: created_at))
|
||||
init?(invoice: ndb_invoice_block) {
|
||||
|
||||
guard let invoice = invoice_block_as_invoice(invoice) else { return nil }
|
||||
self = .invoice(invoice)
|
||||
}
|
||||
}
|
||||
|
||||
func invoice_block_as_invoice(_ invoice: ndb_invoice_block) -> Invoice? {
|
||||
let invstr = invoice.invstr.as_str()
|
||||
let b11 = invoice.invoice
|
||||
|
||||
guard let description = convert_invoice_description(b11: b11) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
let amount: Amount = b11.amount == 0 ? .any : .specific(Int64(b11.amount))
|
||||
|
||||
return Invoice(description: description, amount: amount, string: invstr, expiry: b11.expiry, created_at: b11.timestamp)
|
||||
|
||||
}
|
||||
|
||||
fileprivate extension Block {
|
||||
/// Failable initializer for the C-backed type `mention_bech32_block_t`. This initializer will inspect the
|
||||
/// bech32 type code and build the appropriate enum type.
|
||||
init?(bech32 b: mention_bech32_block_t) {
|
||||
init?(bech32 b: ndb_mention_bech32_block) {
|
||||
guard let decoded = decodeCBech32(b.bech32) else {
|
||||
return nil
|
||||
}
|
||||
@@ -180,6 +148,7 @@ fileprivate extension Block {
|
||||
self = .mention(.any(ref))
|
||||
}
|
||||
}
|
||||
|
||||
extension Block {
|
||||
var asString: String {
|
||||
switch self {
|
||||
@@ -51,4 +51,15 @@ struct NoteId: IdType, TagKey, TagConvertible {
|
||||
|
||||
return note_id
|
||||
}
|
||||
|
||||
func withUnsafePointer<T>(_ body: (UnsafePointer<UInt8>) throws -> T) rethrows -> T {
|
||||
return try self.id.withUnsafeBytes { (bytes: UnsafeRawBufferPointer) in
|
||||
guard let baseAddress = bytes.baseAddress else {
|
||||
fatalError("Cannot get base address")
|
||||
}
|
||||
return try baseAddress.withMemoryRebound(to: UInt8.self, capacity: bytes.count) { ptr in
|
||||
return try body(ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -44,4 +44,14 @@ struct Pubkey: IdType, TagKey, TagConvertible, Identifiable {
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func withUnsafePointer<T>(_ body: (UnsafePointer<UInt8>) throws -> T) rethrows -> T {
|
||||
return try self.id.withUnsafeBytes { (bytes: UnsafeRawBufferPointer) in
|
||||
guard let baseAddress = bytes.baseAddress else {
|
||||
fatalError("Cannot get base address")
|
||||
}
|
||||
return try baseAddress.withMemoryRebound(to: UInt8.self, capacity: bytes.count) { ptr in
|
||||
return try body(ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,12 +25,13 @@ class ActionBarModel: ObservableObject {
|
||||
@Published private(set) var zaps: Int
|
||||
@Published var zap_total: Int64
|
||||
@Published var replies: Int
|
||||
|
||||
@Published var relays: Int
|
||||
|
||||
static func empty() -> ActionBarModel {
|
||||
return ActionBarModel(likes: 0, boosts: 0, zaps: 0, zap_total: 0, replies: 0, our_like: nil, our_boost: nil, our_zap: nil, our_reply: nil)
|
||||
}
|
||||
|
||||
init(likes: Int = 0, boosts: Int = 0, zaps: Int = 0, zap_total: Int64 = 0, replies: Int = 0, our_like: NostrEvent? = nil, our_boost: NostrEvent? = nil, our_zap: Zapping? = nil, our_reply: NostrEvent? = nil, our_quote_repost: NostrEvent? = nil, quote_reposts: Int = 0) {
|
||||
init(likes: Int = 0, boosts: Int = 0, zaps: Int = 0, zap_total: Int64 = 0, replies: Int = 0, our_like: NostrEvent? = nil, our_boost: NostrEvent? = nil, our_zap: Zapping? = nil, our_reply: NostrEvent? = nil, our_quote_repost: NostrEvent? = nil, quote_reposts: Int = 0, relays: Int = 0) {
|
||||
self.likes = likes
|
||||
self.boosts = boosts
|
||||
self.zaps = zaps
|
||||
@@ -42,9 +43,11 @@ class ActionBarModel: ObservableObject {
|
||||
self.our_reply = our_reply
|
||||
self.our_quote_repost = our_quote_repost
|
||||
self.quote_reposts = quote_reposts
|
||||
self.relays = relays
|
||||
}
|
||||
|
||||
func update(damus: DamusState, evid: NoteId) {
|
||||
@MainActor
|
||||
func update(damus: DamusState, evid: NoteId) async {
|
||||
self.likes = damus.likes.counts[evid] ?? 0
|
||||
self.boosts = damus.boosts.counts[evid] ?? 0
|
||||
self.zaps = damus.zaps.event_counts[evid] ?? 0
|
||||
@@ -56,11 +59,12 @@ class ActionBarModel: ObservableObject {
|
||||
self.our_zap = damus.zaps.our_zaps[evid]?.first
|
||||
self.our_reply = damus.replies.our_reply(evid)
|
||||
self.our_quote_repost = damus.quote_reposts.our_events[evid]
|
||||
self.relays = (await damus.nostrNetwork.relayURLsThatSawNote(id: evid) ?? []).count
|
||||
self.objectWillChange.send()
|
||||
}
|
||||
|
||||
var is_empty: Bool {
|
||||
return likes == 0 && boosts == 0 && zaps == 0
|
||||
return likes == 0 && boosts == 0 && zaps == 0 && quote_reposts == 0 && relays == 0
|
||||
}
|
||||
|
||||
var liked: Bool {
|
||||
@@ -36,10 +36,15 @@ struct EventActionBar: View {
|
||||
self.swipe_context = swipe_context
|
||||
}
|
||||
|
||||
var lnurl: String? {
|
||||
damus_state.profiles.lookup_with_timestamp(event.pubkey)?.map({ pr in
|
||||
pr?.lnurl
|
||||
}).value
|
||||
@State var lnurl: String? = nil
|
||||
|
||||
// Fetching an LNURL is expensive enough that it can cause a hitch. Use a special backgroundable function to fetch the value.
|
||||
// Fetch on `.onAppear`
|
||||
nonisolated func fetchLNURL() {
|
||||
let lnurl = try? damus_state.profiles.lookup_lnurl(event.pubkey)
|
||||
DispatchQueue.main.async {
|
||||
self.lnurl = lnurl
|
||||
}
|
||||
}
|
||||
|
||||
var show_like: Bool {
|
||||
@@ -82,8 +87,10 @@ struct EventActionBar: View {
|
||||
|
||||
var like_swipe_button: some View {
|
||||
SwipeAction(image: "shaka", backgroundColor: DamusColors.adaptableGrey) {
|
||||
send_like(emoji: damus_state.settings.default_emoji_reaction)
|
||||
self.swipe_context?.state.wrappedValue = .closed
|
||||
Task {
|
||||
await send_like(emoji: damus_state.settings.default_emoji_reaction)
|
||||
self.swipe_context?.state.wrappedValue = .closed
|
||||
}
|
||||
}
|
||||
.swipeButtonStyle()
|
||||
.accessibilityLabel(NSLocalizedString("React with default reaction emoji", comment: "Accessibility label for react button"))
|
||||
@@ -131,7 +138,7 @@ struct EventActionBar: View {
|
||||
if bar.liked {
|
||||
//notify(.delete, bar.our_like)
|
||||
} else {
|
||||
send_like(emoji: emoji)
|
||||
Task { await send_like(emoji: emoji) }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,32 +183,42 @@ struct EventActionBar: View {
|
||||
let should_hide_repost = hide_items_without_activity && bar.boosts == 0
|
||||
let should_hide_reactions = hide_items_without_activity && bar.likes == 0
|
||||
let zap_model = self.damus_state.events.get_cache_data(self.event.id).zaps_model
|
||||
let should_hide_zap = hide_items_without_activity && zap_model.zap_total > 0
|
||||
let should_hide_zap = hide_items_without_activity && zap_model.zap_total == 0
|
||||
let should_hide_share_button = hide_items_without_activity
|
||||
// Only render the bar if at least one action is visible; avoids empty overlays/dots.
|
||||
let has_any_action = (!should_hide_chat_bubble && damus_state.keypair.privkey != nil)
|
||||
|| !should_hide_repost
|
||||
|| (show_like && !should_hide_reactions)
|
||||
|| (!should_hide_zap && self.lnurl != nil)
|
||||
|| !should_hide_share_button
|
||||
|
||||
return HStack(spacing: options.contains(.no_spread) ? 10 : 0) {
|
||||
if damus_state.keypair.privkey != nil && !should_hide_chat_bubble {
|
||||
self.reply_button
|
||||
}
|
||||
|
||||
if !should_hide_repost {
|
||||
self.space_if_spread
|
||||
self.repost_button
|
||||
}
|
||||
|
||||
if show_like && !should_hide_reactions {
|
||||
self.space_if_spread
|
||||
self.like_button
|
||||
}
|
||||
|
||||
if let lnurl = self.lnurl, !should_hide_zap {
|
||||
self.space_if_spread
|
||||
NoteZapButton(damus_state: damus_state, target: ZapTarget.note(id: event.id, author: event.pubkey), lnurl: lnurl, zaps: zap_model)
|
||||
}
|
||||
|
||||
if !should_hide_share_button {
|
||||
self.space_if_spread
|
||||
self.share_button
|
||||
return Group {
|
||||
if has_any_action {
|
||||
HStack(spacing: options.contains(.no_spread) ? 10 : 0) {
|
||||
if damus_state.keypair.privkey != nil && !should_hide_chat_bubble {
|
||||
self.reply_button
|
||||
}
|
||||
|
||||
if !should_hide_repost {
|
||||
self.space_if_spread
|
||||
self.repost_button
|
||||
}
|
||||
|
||||
if show_like && !should_hide_reactions {
|
||||
self.space_if_spread
|
||||
self.like_button
|
||||
}
|
||||
|
||||
if let lnurl = self.lnurl, !should_hide_zap {
|
||||
self.space_if_spread
|
||||
NoteZapButton(damus_state: damus_state, target: ZapTarget.note(id: event.id, author: event.pubkey), lnurl: lnurl, zaps: zap_model)
|
||||
}
|
||||
|
||||
if !should_hide_share_button {
|
||||
self.space_if_spread
|
||||
self.share_button
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,11 +234,31 @@ struct EventActionBar: View {
|
||||
AnyView(self.action_bar_content)
|
||||
}
|
||||
}
|
||||
|
||||
@State var event_relay_url_strings: [RelayURL] = []
|
||||
|
||||
func updateEventRelayURLStrings() async {
|
||||
let newValue = await fetchEventRelayURLStrings()
|
||||
self.event_relay_url_strings = newValue
|
||||
}
|
||||
|
||||
func fetchEventRelayURLStrings() async -> [RelayURL] {
|
||||
let relays = await damus_state.nostrNetwork.relaysForEvent(event: event)
|
||||
if !relays.isEmpty {
|
||||
return relays.prefix(Constants.MAX_SHARE_RELAYS).map { $0 }
|
||||
}
|
||||
|
||||
return userProfile.getCappedRelays()
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
self.content
|
||||
.onAppear {
|
||||
self.bar.update(damus: damus_state, evid: self.event.id)
|
||||
Task.detached(priority: .background, operation: {
|
||||
await self.bar.update(damus: damus_state, evid: self.event.id)
|
||||
self.fetchLNURL()
|
||||
await self.updateEventRelayURLStrings()
|
||||
})
|
||||
}
|
||||
.sheet(isPresented: $show_share_action, onDismiss: { self.show_share_action = false }) {
|
||||
if #available(iOS 16.0, *) {
|
||||
@@ -233,7 +270,9 @@ struct EventActionBar: View {
|
||||
}
|
||||
}
|
||||
.sheet(isPresented: $show_share_sheet, onDismiss: { self.show_share_sheet = false }) {
|
||||
ShareSheet(activityItems: [URL(string: "https://damus.io/" + event.id.bech32)!])
|
||||
if let url = URL(string: "https://damus.io/" + Bech32Object.encode(.nevent(NEvent(event: event, relays: event_relay_url_strings)))) {
|
||||
ShareSheet(activityItems: [url])
|
||||
}
|
||||
}
|
||||
.sheet(isPresented: $show_repost_action, onDismiss: { self.show_repost_action = false }) {
|
||||
|
||||
@@ -247,7 +286,10 @@ struct EventActionBar: View {
|
||||
}
|
||||
.onReceive(handle_notify(.update_stats)) { target in
|
||||
guard target == self.event.id else { return }
|
||||
self.bar.update(damus: self.damus_state, evid: target)
|
||||
Task {
|
||||
await self.bar.update(damus: self.damus_state, evid: target)
|
||||
await self.updateEventRelayURLStrings()
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.liked)) { liked in
|
||||
if liked.id != event.id {
|
||||
@@ -260,9 +302,9 @@ struct EventActionBar: View {
|
||||
}
|
||||
}
|
||||
|
||||
func send_like(emoji: String) {
|
||||
func send_like(emoji: String) async {
|
||||
guard let keypair = damus_state.keypair.to_full(),
|
||||
let like_ev = make_like_event(keypair: keypair, liked: event, content: emoji) else {
|
||||
let like_ev = await make_like_event(keypair: keypair, liked: event, content: emoji, relayURL: damus_state.nostrNetwork.relaysForEvent(event: event).first) else {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -270,7 +312,7 @@ struct EventActionBar: View {
|
||||
|
||||
generator.impactOccurred()
|
||||
|
||||
damus_state.nostrNetwork.postbox.send(like_ev)
|
||||
await damus_state.nostrNetwork.postbox.send(like_ev)
|
||||
}
|
||||
|
||||
// MARK: Helper structures
|
||||
@@ -13,6 +13,7 @@ struct EventDetailBar: View {
|
||||
let target_pk: Pubkey
|
||||
|
||||
@ObservedObject var bar: ActionBarModel
|
||||
@State var relays: [RelayURL] = []
|
||||
|
||||
init(state: DamusState, target: NoteId, target_pk: Pubkey) {
|
||||
self.state = state
|
||||
@@ -59,7 +60,28 @@ struct EventDetailBar: View {
|
||||
}
|
||||
.buttonStyle(PlainButtonStyle())
|
||||
}
|
||||
|
||||
if bar.relays > 0 {
|
||||
NavigationLink(value: Route.UserRelays(relays: relays)) {
|
||||
let nounString = pluralizedString(key: "relays_count", count: bar.relays)
|
||||
let noun = Text(nounString).foregroundColor(.gray)
|
||||
Text("\(Text(verbatim: bar.relays.formatted()).font(.body.bold())) \(noun)", comment: "Sentence composed of 2 variables to describe how many relays a note was found on. In source English, the first variable is the number of relays, and the second variable is 'Relay' or 'Relays'.")
|
||||
}
|
||||
.buttonStyle(PlainButtonStyle())
|
||||
}
|
||||
}
|
||||
.onAppear {
|
||||
Task { await self.updateSeenRelays() }
|
||||
}
|
||||
.onReceive(handle_notify(.update_stats)) { noteId in
|
||||
guard noteId == target else { return }
|
||||
Task { await self.updateSeenRelays() }
|
||||
}
|
||||
}
|
||||
|
||||
func updateSeenRelays() async {
|
||||
let relays = await Array(state.nostrNetwork.relayURLsThatSawNote(id: target) ?? [])
|
||||
self.relays = relays
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,23 @@ struct ShareAction: View {
|
||||
self.userProfile = userProfile
|
||||
self._show_share = show_share
|
||||
}
|
||||
|
||||
@State var event_relay_url_strings: [RelayURL] = []
|
||||
|
||||
func updateEventRelayURLStrings() async {
|
||||
let newValue = await fetchEventRelayURLStrings()
|
||||
self.event_relay_url_strings = newValue
|
||||
}
|
||||
|
||||
func fetchEventRelayURLStrings() async -> [RelayURL] {
|
||||
let relays = await userProfile.damus.nostrNetwork.relaysForEvent(event: event)
|
||||
if !relays.isEmpty {
|
||||
return relays.prefix(Constants.MAX_SHARE_RELAYS).map { $0 }
|
||||
}
|
||||
|
||||
return userProfile.getCappedRelays()
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
|
||||
VStack {
|
||||
@@ -40,7 +56,7 @@ struct ShareAction: View {
|
||||
|
||||
ShareActionButton(img: "link", text: NSLocalizedString("Copy Link", comment: "Button to copy link to note")) {
|
||||
dismiss()
|
||||
UIPasteboard.general.string = "https://damus.io/" + Bech32Object.encode(.nevent(NEvent(noteid: event.id, relays: userProfile.getCappedRelayStrings())))
|
||||
UIPasteboard.general.string = "https://damus.io/" + Bech32Object.encode(.nevent(NEvent(noteid: event.id, relays: userProfile.getCappedRelays())))
|
||||
}
|
||||
|
||||
let bookmarkImg = isBookmarked ? "bookmark.fill" : "bookmark"
|
||||
@@ -71,8 +87,13 @@ struct ShareAction: View {
|
||||
}
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.update_stats), perform: { noteId in
|
||||
guard noteId == event.id else { return }
|
||||
Task { await self.updateEventRelayURLStrings() }
|
||||
})
|
||||
.onAppear() {
|
||||
userProfile.subscribeToFindRelays()
|
||||
Task { await self.updateEventRelayURLStrings() }
|
||||
}
|
||||
.onDisappear() {
|
||||
userProfile.unsubscribeFindRelays()
|
||||
@@ -57,13 +57,13 @@ struct ReportView: View {
|
||||
.padding()
|
||||
}
|
||||
|
||||
func do_send_report() {
|
||||
func do_send_report() async {
|
||||
guard let selected_report_type,
|
||||
let ev = NostrEvent(content: report_message, keypair: keypair.to_keypair(), kind: 1984, tags: target.reportTags(type: selected_report_type)) else {
|
||||
return
|
||||
}
|
||||
|
||||
postbox.send(ev)
|
||||
await postbox.send(ev)
|
||||
|
||||
report_sent = true
|
||||
report_id = bech32_note_id(ev.id)
|
||||
@@ -116,7 +116,7 @@ struct ReportView: View {
|
||||
|
||||
Section(content: {
|
||||
Button(send_report_button_text) {
|
||||
do_send_report()
|
||||
Task { await do_send_report() }
|
||||
}
|
||||
.disabled(selected_report_type == nil)
|
||||
}, footer: {
|
||||
@@ -19,13 +19,15 @@ struct RepostAction: View {
|
||||
|
||||
Button {
|
||||
dismiss()
|
||||
|
||||
guard let keypair = self.damus_state.keypair.to_full(),
|
||||
let boost = make_boost_event(keypair: keypair, boosted: self.event) else {
|
||||
return
|
||||
|
||||
Task {
|
||||
guard let keypair = self.damus_state.keypair.to_full(),
|
||||
let boost = await make_boost_event(keypair: keypair, boosted: self.event, relayURL: damus_state.nostrNetwork.relaysForEvent(event: self.event).first) else {
|
||||
return
|
||||
}
|
||||
|
||||
await damus_state.nostrNetwork.postbox.send(boost)
|
||||
}
|
||||
|
||||
damus_state.nostrNetwork.postbox.send(boost)
|
||||
} label: {
|
||||
Label(NSLocalizedString("Repost", comment: "Button to repost a note"), image: "repost")
|
||||
.frame(maxWidth: .infinity, minHeight: 50, maxHeight: 50, alignment: .leading)
|
||||
@@ -27,7 +27,7 @@ struct Reposted: View {
|
||||
|
||||
// Show profile picture of the reposter only if the reposter is not the author of the reposted note.
|
||||
if pubkey != target.pubkey {
|
||||
ProfilePicView(pubkey: pubkey, size: eventview_pfp_size(.small), highlight: .none, profiles: damus.profiles, disable_animation: damus.settings.disable_animation)
|
||||
ProfilePicView(pubkey: pubkey, size: eventview_pfp_size(.small), highlight: .none, profiles: damus.profiles, disable_animation: damus.settings.disable_animation, damusState: damus)
|
||||
.onTapGesture {
|
||||
show_profile_action_sheet_if_enabled(damus_state: damus, pubkey: pubkey)
|
||||
}
|
||||
@@ -36,9 +36,24 @@ struct ChatEventView: View {
|
||||
@State var selected_emoji: Emoji?
|
||||
|
||||
@State private var isOnTopHalfOfScreen: Bool = false
|
||||
@ObservedObject var bar: ActionBarModel
|
||||
@StateObject private var bar: ActionBarModel
|
||||
@Environment(\.swipeViewGroupSelection) var swipeViewGroupSelection
|
||||
|
||||
init(event: NostrEvent, selected_event: NostrEvent, prev_ev: NostrEvent?, next_ev: NostrEvent?, damus_state: DamusState, thread: ThreadModel, scroll_to_event: ((_ id: NoteId) -> Void)?, focus_event: (() -> Void)?, highlight_bubble: Bool) {
|
||||
self.event = event
|
||||
self.selected_event = selected_event
|
||||
self.prev_ev = prev_ev
|
||||
self.next_ev = next_ev
|
||||
self.damus_state = damus_state
|
||||
self.thread = thread
|
||||
self.scroll_to_event = scroll_to_event
|
||||
self.focus_event = focus_event
|
||||
self.highlight_bubble = highlight_bubble
|
||||
|
||||
// Initialize @StateObject using wrappedValue
|
||||
_bar = StateObject(wrappedValue: make_actionbar_model(ev: event.id, damus: damus_state))
|
||||
}
|
||||
|
||||
enum PopoverState: String {
|
||||
case closed
|
||||
case open_emoji_selector
|
||||
@@ -83,7 +98,7 @@ struct ChatEventView: View {
|
||||
|
||||
var profile_picture_view: some View {
|
||||
VStack {
|
||||
ProfilePicView(pubkey: event.pubkey, size: 32, highlight: .none, profiles: damus_state.profiles, disable_animation: disable_animation)
|
||||
ProfilePicView(pubkey: event.pubkey, size: 32, highlight: .none, profiles: damus_state.profiles, disable_animation: disable_animation, damusState: damus_state)
|
||||
.onTapGesture {
|
||||
show_profile_action_sheet_if_enabled(damus_state: damus_state, pubkey: event.pubkey)
|
||||
}
|
||||
@@ -100,9 +115,7 @@ struct ChatEventView: View {
|
||||
// MARK: Zapping properties
|
||||
|
||||
var lnurl: String? {
|
||||
damus_state.profiles.lookup_with_timestamp(event.pubkey)?.map({ pr in
|
||||
pr?.lnurl
|
||||
}).value
|
||||
try? damus_state.profiles.lookup_lnurl(event.pubkey)
|
||||
}
|
||||
var zap_target: ZapTarget {
|
||||
ZapTarget.note(id: event.id, author: event.pubkey)
|
||||
@@ -144,7 +157,7 @@ struct ChatEventView: View {
|
||||
let blur_images = should_blur_images(settings: damus_state.settings, contacts: damus_state.contacts, ev: event, our_pubkey: damus_state.pubkey)
|
||||
NoteContentView(damus_state: damus_state, event: event, blur_images: blur_images, size: .normal, options: [.truncate_content])
|
||||
.padding(2)
|
||||
if let mention = first_eref_mention(ev: event, keypair: damus_state.keypair) {
|
||||
if let mention = first_eref_mention(ndb: damus_state.ndb, ev: event, keypair: damus_state.keypair) {
|
||||
MentionView(damus_state: damus_state, mention: mention)
|
||||
.background(DamusColors.adaptableWhite)
|
||||
.clipShape(RoundedRectangle(cornerSize: CGSize(width: 10, height: 10)))
|
||||
@@ -197,8 +210,10 @@ struct ChatEventView: View {
|
||||
}
|
||||
.onChange(of: selected_emoji) { newSelectedEmoji in
|
||||
if let newSelectedEmoji {
|
||||
send_like(emoji: newSelectedEmoji.value)
|
||||
popover_state = .closed
|
||||
Task {
|
||||
await send_like(emoji: newSelectedEmoji.value)
|
||||
popover_state = .closed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -233,9 +248,9 @@ struct ChatEventView: View {
|
||||
)
|
||||
}
|
||||
|
||||
func send_like(emoji: String) {
|
||||
func send_like(emoji: String) async {
|
||||
guard let keypair = damus_state.keypair.to_full(),
|
||||
let like_ev = make_like_event(keypair: keypair, liked: event, content: emoji) else {
|
||||
let like_ev = make_like_event(keypair: keypair, liked: event, content: emoji, relayURL: await damus_state.nostrNetwork.relaysForEvent(event: event).first) else {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -244,7 +259,7 @@ struct ChatEventView: View {
|
||||
let generator = UIImpactFeedbackGenerator(style: .medium)
|
||||
generator.impactOccurred()
|
||||
|
||||
damus_state.nostrNetwork.postbox.send(like_ev)
|
||||
await damus_state.nostrNetwork.postbox.send(like_ev)
|
||||
}
|
||||
|
||||
var action_bar: some View {
|
||||
@@ -338,21 +353,17 @@ struct ChatEventView: View {
|
||||
}
|
||||
|
||||
#Preview {
|
||||
let bar = make_actionbar_model(ev: test_note.id, damus: test_damus_state)
|
||||
return ChatEventView(event: test_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false, bar: bar)
|
||||
return ChatEventView(event: test_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false)
|
||||
}
|
||||
|
||||
#Preview {
|
||||
let bar = make_actionbar_model(ev: test_note.id, damus: test_damus_state)
|
||||
return ChatEventView(event: test_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false, bar: bar)
|
||||
return ChatEventView(event: test_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false)
|
||||
}
|
||||
|
||||
#Preview {
|
||||
let bar = make_actionbar_model(ev: test_note.id, damus: test_damus_state)
|
||||
return ChatEventView(event: test_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: true, bar: bar)
|
||||
return ChatEventView(event: test_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: true)
|
||||
}
|
||||
|
||||
#Preview {
|
||||
let bar = make_actionbar_model(ev: test_note.id, damus: test_damus_state)
|
||||
return ChatEventView(event: test_super_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false, bar: bar)
|
||||
return ChatEventView(event: test_super_short_note, selected_event: test_note, prev_ev: nil, next_ev: nil, damus_state: test_damus_state, thread: ThreadModel(event: test_note, damus_state: test_damus_state), scroll_to_event: nil, focus_event: nil, highlight_bubble: false)
|
||||
}
|
||||
@@ -23,9 +23,79 @@ struct ChatroomThreadView: View {
|
||||
@State var showStickyHeader: Bool = false
|
||||
@State var untrustedSectionOffset: CGFloat = 0
|
||||
|
||||
// Add state for reading progress (longform articles)
|
||||
@State private var readingProgress: CGFloat = 0
|
||||
@State private var viewportHeight: CGFloat = 0
|
||||
@State private var contentTopY: CGFloat = 0
|
||||
@State private var contentBottomY: CGFloat = 0
|
||||
@State private var initialTopY: CGFloat? = nil
|
||||
|
||||
// Focus mode: auto-hide chrome (nav bar + tab bar) during longform reading
|
||||
@State private var chromeHidden: Bool = false
|
||||
@State private var lastScrollY: CGFloat = 0
|
||||
/// Minimum scroll distance before triggering chrome hide/show
|
||||
private let scrollThreshold: CGFloat = 15
|
||||
|
||||
private static let untrusted_network_section_id = "untrusted-network-section"
|
||||
private static let sticky_header_adjusted_anchor = UnitPoint(x: UnitPoint.top.x, y: 0.2)
|
||||
|
||||
/// Returns true if the selected event is a longform article (kind 30023).
|
||||
var isLongformEvent: Bool {
|
||||
thread.selected_event.kind == 30023
|
||||
}
|
||||
|
||||
/// Updates reading progress based on scroll position.
|
||||
private func updateReadingProgress() {
|
||||
guard thread.selected_event.kind == 30023 else { return }
|
||||
guard viewportHeight > 0 else { return }
|
||||
|
||||
// Capture initial position on first update
|
||||
if initialTopY == nil {
|
||||
initialTopY = contentTopY
|
||||
}
|
||||
guard let startY = initialTopY else { return }
|
||||
|
||||
// Content height is constant (bottom - top in global coords)
|
||||
let contentHeight = contentBottomY - contentTopY
|
||||
guard contentHeight > 0 else { return }
|
||||
|
||||
// How much we've scrolled from initial position
|
||||
// As we scroll down, contentTopY decreases, so scrolled = startY - currentTopY
|
||||
let scrolled = startY - contentTopY
|
||||
let maxScroll = max(contentHeight - viewportHeight, 1)
|
||||
|
||||
let progress = scrolled / maxScroll
|
||||
readingProgress = min(max(progress, 0), 1)
|
||||
}
|
||||
|
||||
/// Updates chrome visibility based on scroll direction (longform only).
|
||||
/// Scrolling down hides chrome; tap to restore (scroll up does not restore).
|
||||
private func updateChromeVisibility(newY: CGFloat) {
|
||||
guard isLongformEvent else { return }
|
||||
|
||||
let delta = newY - lastScrollY
|
||||
|
||||
// Only hide chrome on scroll down, don't restore on scroll up (use tap instead)
|
||||
if delta < -scrollThreshold && !chromeHidden {
|
||||
withAnimation(.easeInOut(duration: 0.25)) {
|
||||
chromeHidden = true
|
||||
}
|
||||
notify(.display_tabbar(false))
|
||||
}
|
||||
|
||||
// Always update lastScrollY to prevent stale delta accumulation
|
||||
lastScrollY = newY
|
||||
}
|
||||
|
||||
/// Shows chrome (nav bar + tab bar) - called on tap or when leaving view.
|
||||
private func showChrome() {
|
||||
guard chromeHidden else { return }
|
||||
withAnimation(.easeInOut(duration: 0.25)) {
|
||||
chromeHidden = false
|
||||
}
|
||||
notify(.display_tabbar(true))
|
||||
}
|
||||
|
||||
func go_to_event(scroller: ScrollViewProxy, note_id: NoteId) {
|
||||
let adjustedAnchor: UnitPoint = showStickyHeader ? ChatroomThreadView.sticky_header_adjusted_anchor : .top
|
||||
|
||||
@@ -46,7 +116,14 @@ struct ChatroomThreadView: View {
|
||||
}
|
||||
|
||||
func trusted_event_filter(_ event: NostrEvent) -> Bool {
|
||||
!damus.settings.show_trusted_replies_first || damus.contacts.is_in_friendosphere(event.pubkey)
|
||||
// Always trust our own replies; otherwise gate by trusted network when the setting is enabled
|
||||
if event.pubkey == damus.pubkey {
|
||||
return true
|
||||
}
|
||||
if !damus.settings.show_trusted_replies_first {
|
||||
return true
|
||||
}
|
||||
return damus.contacts.is_in_friendosphere(event.pubkey)
|
||||
}
|
||||
|
||||
func ThreadedSwipeViewGroup(scroller: ScrollViewProxy, events: [NostrEvent]) -> some View {
|
||||
@@ -64,8 +141,7 @@ struct ChatroomThreadView: View {
|
||||
focus_event: {
|
||||
self.set_active_event(scroller: scroller, ev: ev)
|
||||
},
|
||||
highlight_bubble: highlighted_note_id == ev.id,
|
||||
bar: make_actionbar_model(ev: ev.id, damus: damus)
|
||||
highlight_bubble: highlighted_note_id == ev.id
|
||||
)
|
||||
.id(ev.id)
|
||||
.matchedGeometryEffect(id: ev.id.hex(), in: animation, anchor: .center)
|
||||
@@ -109,6 +185,22 @@ struct ChatroomThreadView: View {
|
||||
|
||||
ZStack(alignment: .top) {
|
||||
ScrollView(.vertical) {
|
||||
VStack(spacing: 0) {
|
||||
// Top scroll position tracker
|
||||
GeometryReader { geo in
|
||||
Color.clear
|
||||
.onChange(of: geo.frame(in: .global).minY) { newY in
|
||||
contentTopY = newY
|
||||
updateReadingProgress()
|
||||
updateChromeVisibility(newY: newY)
|
||||
}
|
||||
.onAppear {
|
||||
contentTopY = geo.frame(in: .global).minY
|
||||
lastScrollY = geo.frame(in: .global).minY
|
||||
}
|
||||
}
|
||||
.frame(height: 1)
|
||||
|
||||
LazyVStack(alignment: .leading, spacing: 8) {
|
||||
// MARK: - Parents events view
|
||||
ForEach(thread.parent_events, id: \.id) { parent_event in
|
||||
@@ -156,7 +248,8 @@ struct ChatroomThreadView: View {
|
||||
ThreadedSwipeViewGroup(scroller: scroller, events: trusted_events)
|
||||
}
|
||||
}
|
||||
.padding(.top)
|
||||
// Remove top padding for longform articles with sepia to eliminate gap
|
||||
.padding(.top, isLongformEvent && damus.settings.longform_sepia_mode ? 0 : nil)
|
||||
|
||||
// MARK: - Children view - outside trusted network
|
||||
if !untrusted_events.isEmpty {
|
||||
@@ -215,11 +308,41 @@ struct ChatroomThreadView: View {
|
||||
}
|
||||
}
|
||||
|
||||
// Bottom scroll position tracker - placed before EndBlock so we measure article content, not padding
|
||||
GeometryReader { geo in
|
||||
Color.clear
|
||||
.onChange(of: geo.frame(in: .global).minY) { newY in
|
||||
contentBottomY = newY
|
||||
updateReadingProgress()
|
||||
}
|
||||
.onAppear {
|
||||
contentBottomY = geo.frame(in: .global).minY
|
||||
}
|
||||
}
|
||||
.frame(height: 1)
|
||||
|
||||
EndBlock()
|
||||
|
||||
HStack {}
|
||||
.frame(height: tabHeight + getSafeAreaBottom())
|
||||
} // End VStack wrapper
|
||||
}
|
||||
.background(
|
||||
GeometryReader { geo in
|
||||
Color.clear
|
||||
.onAppear {
|
||||
viewportHeight = geo.size.height
|
||||
}
|
||||
.onChange(of: geo.size.height) { newHeight in
|
||||
// Reset baseline on significant height change (orientation, text size)
|
||||
if abs(newHeight - viewportHeight) > 50 {
|
||||
initialTopY = nil
|
||||
}
|
||||
viewportHeight = newHeight
|
||||
updateReadingProgress()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if showStickyHeader && !untrusted_events.isEmpty {
|
||||
VStack {
|
||||
@@ -234,6 +357,15 @@ struct ChatroomThreadView: View {
|
||||
.transition(.move(edge: .top).combined(with: .opacity))
|
||||
.zIndex(1)
|
||||
}
|
||||
|
||||
// Reading progress bar - show for longform articles
|
||||
if thread.selected_event.kind == 30023 {
|
||||
VStack(spacing: 0) {
|
||||
ReadingProgressBar(progress: readingProgress)
|
||||
Spacer()
|
||||
}
|
||||
.zIndex(100)
|
||||
}
|
||||
}
|
||||
.onReceive(handle_notify(.post), perform: { notify in
|
||||
switch notify {
|
||||
@@ -251,11 +383,37 @@ struct ChatroomThreadView: View {
|
||||
}
|
||||
.onAppear() {
|
||||
thread.subscribe()
|
||||
scroll_to_event(scroller: scroller, id: thread.selected_event.id, delay: 0.1, animate: false)
|
||||
// Use .top anchor for longform articles so they open at the title,
|
||||
// keep .bottom for regular notes to preserve parent context visibility
|
||||
let anchor: UnitPoint = thread.selected_event.known_kind == .longform ? .top : .bottom
|
||||
scroll_to_event(scroller: scroller, id: thread.selected_event.id, delay: 0.1, animate: false, anchor: anchor)
|
||||
// Ensure chrome is visible when view appears (handles interrupted transitions)
|
||||
if isLongformEvent {
|
||||
chromeHidden = false
|
||||
notify(.display_tabbar(true))
|
||||
}
|
||||
}
|
||||
.onChange(of: thread.selected_event.id) { _ in
|
||||
// Reset reading progress when switching to a different event
|
||||
initialTopY = nil
|
||||
readingProgress = 0
|
||||
// Restore chrome when switching events (user tapped to select)
|
||||
showChrome()
|
||||
}
|
||||
.onDisappear() {
|
||||
thread.unsubscribe()
|
||||
showChrome() // Restore chrome when leaving view
|
||||
}
|
||||
.navigationBarHidden(chromeHidden && isLongformEvent)
|
||||
// Tap anywhere to show chrome when hidden (doesn't block other gestures)
|
||||
.simultaneousGesture(
|
||||
TapGesture()
|
||||
.onEnded { _ in
|
||||
if isLongformEvent && chromeHidden {
|
||||
showChrome()
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -279,3 +437,4 @@ struct ChatroomView_Previews: PreviewProvider {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,12 +56,7 @@ class ThreadModel: ObservableObject {
|
||||
/// The damus state, needed to access the relay pool and load the thread events
|
||||
let damus_state: DamusState
|
||||
|
||||
private let profiles_subid = UUID().description
|
||||
private let base_subid = UUID().description
|
||||
private let meta_subid = UUID().description
|
||||
private var subids: [String] {
|
||||
return [profiles_subid, base_subid, meta_subid]
|
||||
}
|
||||
private var listener: Task<Void, Never>?
|
||||
|
||||
|
||||
// MARK: Initialization
|
||||
@@ -86,17 +81,6 @@ class ThreadModel: ObservableObject {
|
||||
|
||||
// MARK: Relay pool subscription management
|
||||
|
||||
/// Unsubscribe from events in the relay pool. Call this when unloading the view
|
||||
func unsubscribe() {
|
||||
self.damus_state.nostrNetwork.pool.remove_handler(sub_id: base_subid)
|
||||
self.damus_state.nostrNetwork.pool.remove_handler(sub_id: meta_subid)
|
||||
self.damus_state.nostrNetwork.pool.remove_handler(sub_id: profiles_subid)
|
||||
self.damus_state.nostrNetwork.pool.unsubscribe(sub_id: base_subid)
|
||||
self.damus_state.nostrNetwork.pool.unsubscribe(sub_id: meta_subid)
|
||||
self.damus_state.nostrNetwork.pool.unsubscribe(sub_id: profiles_subid)
|
||||
Log.info("unsubscribing to thread %s with sub_id %s", for: .render, original_event.id.hex(), base_subid)
|
||||
}
|
||||
|
||||
/// Subscribe to events in this thread. Call this when loading the view.
|
||||
func subscribe() {
|
||||
var meta_events = NostrFilter()
|
||||
@@ -127,10 +111,19 @@ class ThreadModel: ObservableObject {
|
||||
|
||||
let base_filters = [event_filter, ref_events]
|
||||
let meta_filters = [meta_events, quote_events]
|
||||
|
||||
Log.info("subscribing to thread %s with sub_id %s", for: .render, original_event.id.hex(), base_subid)
|
||||
damus_state.nostrNetwork.pool.subscribe(sub_id: base_subid, filters: base_filters, handler: handle_event)
|
||||
damus_state.nostrNetwork.pool.subscribe(sub_id: meta_subid, filters: meta_filters, handler: handle_event)
|
||||
|
||||
self.listener?.cancel()
|
||||
self.listener = Task {
|
||||
Log.info("subscribing to thread %s ", for: .render, original_event.id.hex())
|
||||
for await event in damus_state.nostrNetwork.reader.streamIndefinitely(filters: base_filters + meta_filters) {
|
||||
event.justUseACopy({ handle_event(ev: $0) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unsubscribe() {
|
||||
self.listener?.cancel()
|
||||
self.listener = nil
|
||||
}
|
||||
|
||||
/// Adds an event to this thread.
|
||||
@@ -175,34 +168,25 @@ class ThreadModel: ObservableObject {
|
||||
///
|
||||
/// Marked as private because it is this class' responsibility to load events, not the view's. Simplify the interface
|
||||
@MainActor
|
||||
private func handle_event(relay_id: RelayURL, ev: NostrConnectionEvent) {
|
||||
let (sub_id, done) = handle_subid_event(pool: damus_state.nostrNetwork.pool, relay_id: relay_id, ev: ev) { sid, ev in
|
||||
guard subids.contains(sid) else {
|
||||
return
|
||||
private func handle_event(ev: NostrEvent) {
|
||||
if ev.known_kind == .zap {
|
||||
process_zap_event(state: damus_state, ev: ev) { zap in
|
||||
|
||||
}
|
||||
|
||||
if ev.known_kind == .zap {
|
||||
process_zap_event(state: damus_state, ev: ev) { zap in
|
||||
|
||||
}
|
||||
} else if ev.is_textlike {
|
||||
// handle thread quote reposts, we just count them instead of
|
||||
// adding them to the thread
|
||||
if let target = ev.is_quote_repost, target == self.selected_event.id {
|
||||
//let _ = self.damus_state.quote_reposts.add_event(ev, target: target)
|
||||
} else {
|
||||
self.add_event(ev, keypair: damus_state.keypair)
|
||||
}
|
||||
} else if ev.is_textlike {
|
||||
// handle thread quote reposts, we just count them instead of
|
||||
// adding them to the thread
|
||||
if let target = ev.is_quote_repost, target == self.selected_event.id {
|
||||
//let _ = self.damus_state.quote_reposts.add_event(ev, target: target)
|
||||
} else {
|
||||
self.add_event(ev, keypair: damus_state.keypair)
|
||||
}
|
||||
}
|
||||
|
||||
guard done, let sub_id, subids.contains(sub_id) else {
|
||||
return
|
||||
else if ev.known_kind == .boost {
|
||||
damus_state.boosts.add_event(ev, target: original_event.id)
|
||||
}
|
||||
|
||||
if sub_id == self.base_subid {
|
||||
guard let txn = NdbTxn(ndb: damus_state.ndb) else { return }
|
||||
load_profiles(context: "thread", profiles_subid: self.profiles_subid, relay_id: relay_id, load: .from_events(Array(event_map.events)), damus_state: damus_state, txn: txn)
|
||||
else if ev.known_kind == .like {
|
||||
damus_state.likes.add_event(ev, target: original_event.id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,12 +15,18 @@ struct ReplyQuoteView: View {
|
||||
@ObservedObject var thread: ThreadModel
|
||||
let options: EventViewOptions
|
||||
|
||||
@State var can_show_event = true
|
||||
|
||||
func update_should_show_event(event: NdbNote) async {
|
||||
self.can_show_event = await should_show_event(event: event, damus_state: state)
|
||||
}
|
||||
|
||||
func content(event: NdbNote) -> some View {
|
||||
ZStack(alignment: .leading) {
|
||||
VStack(alignment: .leading) {
|
||||
HStack(alignment: .center) {
|
||||
if should_show_event(event: event, damus_state: state) {
|
||||
ProfilePicView(pubkey: event.pubkey, size: 14, highlight: .reply, profiles: state.profiles, disable_animation: false)
|
||||
if can_show_event {
|
||||
ProfilePicView(pubkey: event.pubkey, size: 14, highlight: .reply, profiles: state.profiles, disable_animation: false, damusState: state)
|
||||
let blur_images = should_blur_images(settings: state.settings, contacts: state.contacts, ev: event, our_pubkey: state.pubkey)
|
||||
NoteContentView(damus_state: state, event: event, blur_images: blur_images, size: .small, options: options)
|
||||
.font(.callout)
|
||||
@@ -56,6 +62,9 @@ struct ReplyQuoteView: View {
|
||||
Group {
|
||||
if let event = state.events.lookup(event_id) {
|
||||
self.content(event: event)
|
||||
.onAppear {
|
||||
Task { await self.update_should_show_event(event: event) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user