diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5d16f25d5f0..78531e7f7a8 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -111,7 +111,7 @@ jobs: SLIDING_SYNC_PROXY_URL: "http://localhost:8118" - name: Upload to codecov.io - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: # Work around frequent upload errors, for runs inside the main repo (not PRs from forks). # Otherwise not required for public repos. diff --git a/.typos.toml b/.typos.toml index 425eff5d576..1165eecd437 100644 --- a/.typos.toml +++ b/.typos.toml @@ -22,6 +22,7 @@ sing = "sign" singed = "signed" singing = "signing" Nd = "Nd" +ratatui = "ratatui" [files] extend-exclude = [ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ccb6390a1c..fff86ef2285 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,7 @@ bisections, helping identifying which commit introduced a regression. A *good commit message* should be composed of: -- a hint to which area/feature is related by the commit +- a prefix to indicate which area/feature is related by the commit - a short description that would give sufficient context for a reviewer to guess what the commit is about. @@ -57,7 +57,8 @@ Examples of good commit messages: - “read_receipts: Store receipts locally, fixing #12345” A *proper PR title* would be a one-liner summary of the changes in the PR, following the -same guidelines of a good commit message. +same guidelines of a good commit message, including the area/feature prefix. Something like +`FFI: Allow logs files to be pruned.` would be a good PR title. (An additional bad example of a bad PR title would be `mynickname/branch name`, that is, just the branch name.) diff --git a/Cargo.lock b/Cargo.lock index 547f4e6ac6b..716a8444889 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -492,6 +492,7 @@ dependencies = [ "matrix-sdk-test", "pprof", "ruma", + "serde", "serde_json", "tempfile", "tokio", @@ -635,12 +636,27 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cassowary" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" + [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cbc" version = "0.1.2" @@ -803,6 +819,33 @@ dependencies = [ "cc", ] +[[package]] +name = "color-eyre" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -815,6 +858,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -986,6 +1042,31 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crossterm" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" +dependencies = [ + "bitflags 2.4.2", + "crossterm_winapi", + "libc", + "mio", + "parking_lot", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + [[package]] name = "crunchy" version = "0.2.2" @@ -2693,6 +2774,15 @@ dependencies = [ "log", ] +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "mac" version = "0.1.1" @@ -3121,6 +3211,7 @@ dependencies = [ "stream_assert", "subtle", "thiserror", + "time", "tokio", "tokio-stream", "tracing", @@ -3442,15 +3533,37 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", + "log", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.48.0", ] +[[package]] +name = "multiverse" +version = "0.1.0" +dependencies = [ + "anyhow", + "color-eyre", + "crossterm", + "futures-util", + "imbl", + "matrix-sdk", + "matrix-sdk-ui", + "ratatui", + "rpassword", + "serde_json", + "tokio", + "tracing", + "tracing-appender", + "tracing-subscriber", + "url", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -3833,6 +3946,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + [[package]] name = "p256" version = "0.13.2" @@ -4488,6 +4607,26 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "ratatui" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8" +dependencies = [ + "bitflags 2.4.2", + "cassowary", + "compact_str", + "crossterm", + "indoc", + "itertools 0.12.1", + "lru", + "paste", + "stability", + "strum", + "unicode-segmentation", + "unicode-width", +] + [[package]] name = "rayon" version = "1.8.1" @@ -4692,19 +4831,14 @@ dependencies = [ ] [[package]] -name = "rrrepl" -version = "0.1.0" +name = "rpassword" +version = "7.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" dependencies = [ - "anyhow", - "futures-util", - "matrix-sdk", - "matrix-sdk-ui", - "serde_json", - "tokio", - "tracing", - "tracing-appender", - "tracing-subscriber", - "url", + "libc", + "rtoolbox", + "windows-sys 0.48.0", ] [[package]] @@ -4727,10 +4861,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rtoolbox" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ruma" version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "assign", "js_int", @@ -4746,7 +4890,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "as_variant", "assign", @@ -4765,7 +4909,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "as_variant", "base64 0.21.7", @@ -4795,7 +4939,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "as_variant", "indexmap 2.2.2", @@ -4819,7 +4963,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "js_int", "ruma-common", @@ -4831,7 +4975,7 @@ dependencies = [ [[package]] name = "ruma-html" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "as_variant", "html5ever", @@ -4843,7 +4987,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "js_int", "thiserror", @@ -4852,7 +4996,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "once_cell", "proc-macro-crate 2.0.2", @@ -4867,7 +5011,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=68c9bb0930f2195fa8672fbef9633ef62737df5d#68c9bb0930f2195fa8672fbef9633ef62737df5d" +source = "git+https://github.com/ruma/ruma?rev=b2542df2bbbdf09af0612c9f28bcfa5620e1911c#b2542df2bbbdf09af0612c9f28bcfa5620e1911c" dependencies = [ "js_int", "ruma-common", @@ -5299,6 +5443,36 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" +dependencies = [ + "libc", + "mio", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "2.2.0" @@ -5377,6 +5551,16 @@ dependencies = [ "der", ] +[[package]] +name = "stability" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -5459,6 +5643,28 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "strum" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.48", +] + [[package]] name = "subtle" version = "2.5.0" @@ -5926,6 +6132,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-error" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.2.0" @@ -6053,6 +6269,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + [[package]] name = "unicode-width" version = "0.1.11" @@ -6067,9 +6289,8 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "uniffi" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad0be8bba6c242d2d16922de4a9c8f167b9491729fda552e70f8626bf7302cb" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "camino", @@ -6089,9 +6310,8 @@ dependencies = [ [[package]] name = "uniffi_bindgen" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab31006ab9c9c6870739f0e74235729d1478d82e73571b8f53c25aa176d67535" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "askama", @@ -6114,9 +6334,8 @@ dependencies = [ [[package]] name = "uniffi_build" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aa3a7608c6872dc1ce53199d816a24d2e19af952d82ce557ecc8692a4ae9cba" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "camino", @@ -6125,9 +6344,8 @@ dependencies = [ [[package]] name = "uniffi_checksum_derive" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72775b3afa6adb30e0c92b3107858d2fcb0ff1a417ac242db1f648b0e2dd0ef2" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "quote", "syn 2.0.48", @@ -6135,9 +6353,8 @@ dependencies = [ [[package]] name = "uniffi_core" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6e8db3f4e558faf0e25ac4b5bd775567973a4e18809f1123e74de52a853692" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "async-compat", @@ -6152,9 +6369,8 @@ dependencies = [ [[package]] name = "uniffi_macros" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a126650799f97d97d8e38e3f10f15c65f5bc5a76b021bec21823efe9dd831a02" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "bincode", "camino", @@ -6171,9 +6387,8 @@ dependencies = [ [[package]] name = "uniffi_meta" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f64a99e905671738d9d293f9cce58708ce1af8e13ea29f9d6b6925114fc2e85" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "bytes", @@ -6183,9 +6398,8 @@ dependencies = [ [[package]] name = "uniffi_testing" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdca5719a22edf34c8239cc6ac9e3906d7ebc2a3e8a5e6ece4c3dffc312a4251" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "camino", @@ -6196,9 +6410,8 @@ dependencies = [ [[package]] name = "uniffi_udl" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6817c15714acccd0d0459f99b524cabebfdd622376464a2c6466a6485bdb4b" +version = "0.26.0" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "anyhow", "textwrap", @@ -6503,8 +6716,7 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "weedle2" version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "998d2c24ec099a87daf9467808859f9d82b61f1d9c9701251aea037f514eae0e" +source = "git+https://github.com/mozilla/uniffi-rs?rev=0a5e2eb5760b4ce5549021ec91de546716de8db1#0a5e2eb5760b4ce5549021ec91de546716de8db1" dependencies = [ "nom", ] diff --git a/Cargo.toml b/Cargo.toml index 46fa37d0a55..a17bb12190e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,9 +35,10 @@ futures-core = "0.3.28" futures-executor = "0.3.21" futures-util = { version = "0.3.26", default-features = false, features = ["alloc"] } http = "0.2.6" +imbl = "2.0.0" itertools = "0.12.0" -ruma = { git = "https://github.com/ruma/ruma", rev = "68c9bb0930f2195fa8672fbef9633ef62737df5d", features = ["client-api-c", "compat-upload-signatures", "compat-user-id", "compat-arbitrary-length-ids", "unstable-msc3401"] } -ruma-common = { git = "https://github.com/ruma/ruma", rev = "68c9bb0930f2195fa8672fbef9633ef62737df5d"} +ruma = { git = "https://github.com/ruma/ruma", rev = "b2542df2bbbdf09af0612c9f28bcfa5620e1911c", features = ["client-api-c", "compat-upload-signatures", "compat-user-id", "compat-arbitrary-length-ids", "compat-tag-info", "unstable-msc3401"] } +ruma-common = { git = "https://github.com/ruma/ruma", rev = "b2542df2bbbdf09af0612c9f28bcfa5620e1911c" } once_cell = "1.16.0" rand = "0.8.5" serde = "1.0.151" @@ -50,8 +51,8 @@ tokio = { version = "1.30.0", default-features = false, features = ["sync"] } tokio-stream = "0.1.14" tracing = { version = "0.1.40", default-features = false, features = ["std"] } tracing-core = "0.1.32" -uniffi = "0.26.1" -uniffi_bindgen = "0.26.1" +uniffi = { git = "https://github.com/mozilla/uniffi-rs", rev = "0a5e2eb5760b4ce5549021ec91de546716de8db1" } +uniffi_bindgen = { git = "https://github.com/mozilla/uniffi-rs", rev = "0a5e2eb5760b4ce5549021ec91de546716de8db1" } vodozemac = "0.5.1" zeroize = "1.6.0" diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index 6ba8bc2c7d3..203949869fb 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -15,6 +15,7 @@ matrix-sdk-sqlite = { workspace = true, features = ["crypto-store"] } matrix-sdk-test = { workspace = true } matrix-sdk = { workspace = true } ruma = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } tempfile = "3.3.0" tokio = { version = "1.24.2", default-features = false, features = ["rt-multi-thread"] } @@ -29,3 +30,7 @@ harness = false [[bench]] name = "store_bench" harness = false + +[[bench]] +name = "room_bench" +harness = false diff --git a/benchmarks/benches/crypto_bench.rs b/benchmarks/benches/crypto_bench.rs index 75aa4094fd5..b46a70b907a 100644 --- a/benchmarks/benches/crypto_bench.rs +++ b/benchmarks/benches/crypto_bench.rs @@ -194,7 +194,7 @@ pub fn room_key_sharing(c: &mut Criterion) { machine.mark_request_as_sent(&request.txn_id, &to_device_response).await.unwrap(); } - machine.invalidate_group_session(room_id).await.unwrap(); + machine.discard_room_key(room_id).await.unwrap(); }) }); @@ -225,7 +225,7 @@ pub fn room_key_sharing(c: &mut Criterion) { machine.mark_request_as_sent(&request.txn_id, &to_device_response).await.unwrap(); } - machine.invalidate_group_session(room_id).await.unwrap(); + machine.discard_room_key(room_id).await.unwrap(); }) }); diff --git a/benchmarks/benches/room_bench.rs b/benchmarks/benches/room_bench.rs new file mode 100644 index 00000000000..40f0e633a9c --- /dev/null +++ b/benchmarks/benches/room_bench.rs @@ -0,0 +1,114 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use matrix_sdk::utils::IntoRawStateEventContent; +use matrix_sdk_base::{ + store::StoreConfig, BaseClient, RoomInfo, RoomState, SessionMeta, StateChanges, StateStore, +}; +use matrix_sdk_sqlite::SqliteStateStore; +use matrix_sdk_test::EventBuilder; +use ruma::{ + api::client::membership::get_member_events, + device_id, + events::room::member::{RoomMemberEvent, RoomMemberEventContent}, + owned_room_id, + serde::Raw, + user_id, OwnedUserId, +}; +use serde_json::json; +use tokio::runtime::Builder; + +pub fn receive_all_members_benchmark(c: &mut Criterion) { + const MEMBERS_IN_ROOM: usize = 100000; + + let runtime = Builder::new_multi_thread().build().expect("Can't create runtime"); + let room_id = owned_room_id!("!room:example.com"); + + let ev_builder = EventBuilder::new(); + let mut member_events: Vec> = Vec::with_capacity(MEMBERS_IN_ROOM); + let member_content_json = json!({ + "avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF", + "displayname": "Alice Margatroid", + "membership": "join", + "reason": "Looking for support", + }); + let member_content: Raw = + member_content_json.into_raw_state_event_content().cast(); + for i in 0..MEMBERS_IN_ROOM { + let user_id = OwnedUserId::try_from(format!("@user_{}:matrix.org", i)).unwrap(); + let state_key = user_id.to_string(); + let event: Raw = ev_builder + .make_state_event( + &user_id, + &room_id, + &state_key, + member_content.deserialize().unwrap(), + None, + ) + .cast(); + member_events.push(event); + } + + // Create a fake list of changes, and a session to recover from. + let mut changes = StateChanges::default(); + changes.add_room(RoomInfo::new(&room_id, RoomState::Joined)); + for member_event in member_events.iter() { + let event = member_event.clone().cast(); + changes.add_state_event(&room_id, event.deserialize().unwrap(), event); + } + + // Sqlite + let sqlite_dir = tempfile::tempdir().unwrap(); + let sqlite_store = runtime.block_on(SqliteStateStore::open(sqlite_dir.path(), None)).unwrap(); + runtime + .block_on(sqlite_store.save_changes(&changes)) + .expect("initial filling of sqlite failed"); + + let base_client = BaseClient::with_store_config(StoreConfig::new().state_store(sqlite_store)); + runtime + .block_on(base_client.set_session_meta(SessionMeta { + user_id: user_id!("@somebody:example.com").to_owned(), + device_id: device_id!("DEVICE_ID").to_owned(), + })) + .expect("Could not set session meta"); + base_client.get_or_create_room(&room_id, RoomState::Joined); + + let request = get_member_events::v3::Request::new(room_id.clone()); + let response = get_member_events::v3::Response::new(member_events); + + let count = MEMBERS_IN_ROOM; + let name = format!("{count} members"); + let mut group = c.benchmark_group("Test"); + group.throughput(Throughput::Elements(count as u64)); + group.sample_size(50); + + group.bench_function(BenchmarkId::new("receive_members", name), |b| { + b.to_async(&runtime).iter(|| async { + base_client.receive_all_members(&room_id, &request, &response).await.unwrap(); + }); + }); + + { + let _guard = runtime.enter(); + drop(base_client); + } + + group.finish(); +} + +fn criterion() -> Criterion { + #[cfg(target_os = "linux")] + let criterion = Criterion::default().with_profiler(pprof::criterion::PProfProfiler::new( + 100, + pprof::criterion::Output::Flamegraph(None), + )); + #[cfg(not(target_os = "linux"))] + let criterion = Criterion::default(); + + criterion +} + +criterion_group! { + name = room; + config = criterion(); + targets = receive_all_members_benchmark, +} +criterion_main!(room); diff --git a/bindings/apple/Tests/MatrixRustSDKTests/ClientTests.swift b/bindings/apple/Tests/MatrixRustSDKTests/ClientTests.swift index 8285b2fd64f..17a9cd24a69 100644 --- a/bindings/apple/Tests/MatrixRustSDKTests/ClientTests.swift +++ b/bindings/apple/Tests/MatrixRustSDKTests/ClientTests.swift @@ -23,16 +23,6 @@ final class ClientTests: XCTestCase { } } - func testBuildingWithUsername() { - do { - _ = try ClientBuilder() - .username(username: "@test:matrix.org") - .build() - } catch { - XCTFail("The client should build successfully when given a username.") - } - } - func testBuildingWithInvalidUsername() { do { _ = try ClientBuilder() @@ -40,7 +30,7 @@ final class ClientTests: XCTestCase { .build() XCTFail("The client should not build when given an invalid username.") - } catch ClientError.Generic(let message) { + } catch ClientBuildError.Sdk(let message) { XCTAssertTrue(message.contains(".well-known"), "The client should fail to do the well-known lookup.") } catch { XCTFail("Not expecting any other kind of exception") diff --git a/bindings/matrix-sdk-crypto-ffi/src/lib.rs b/bindings/matrix-sdk-crypto-ffi/src/lib.rs index 62d3d8b8fef..c8875b0fc0f 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/lib.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/lib.rs @@ -1014,7 +1014,8 @@ mod tests { "JGgPQRuYj3ScMdPS+A0P+k/1qS9Hr3qeKXLscI+hS78" ); - let room_keys = machine.runtime.block_on(machine.inner.export_room_keys(|_| true))?; + let room_keys = + machine.runtime.block_on(machine.inner.store().export_room_keys(|_| true))?; assert_eq!(room_keys.len(), 2); let cross_signing_status = machine.cross_signing_status(); diff --git a/bindings/matrix-sdk-crypto-ffi/src/machine.rs b/bindings/matrix-sdk-crypto-ffi/src/machine.rs index 4e4a772c25e..f9f17dd9e03 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/machine.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/machine.rs @@ -965,7 +965,7 @@ impl OlmMachine { passphrase: String, rounds: i32, ) -> Result { - let keys = self.runtime.block_on(self.inner.export_room_keys(|_| true))?; + let keys = self.runtime.block_on(self.inner.store().export_room_keys(|_| true))?; let encrypted = encrypt_room_key_export(&keys, &passphrase, rounds as u32) .map_err(CryptoStoreError::Serialization)?; @@ -1024,7 +1024,7 @@ impl OlmMachine { pub fn discard_room_key(&self, room_id: String) -> Result<(), CryptoStoreError> { let room_id = RoomId::parse(room_id)?; - self.runtime.block_on(self.inner.invalidate_group_session(&room_id))?; + self.runtime.block_on(self.inner.discard_room_key(&room_id))?; Ok(()) } diff --git a/bindings/matrix-sdk-ffi/Cargo.toml b/bindings/matrix-sdk-ffi/Cargo.toml index 4ca3307e2e4..502cf34077e 100644 --- a/bindings/matrix-sdk-ffi/Cargo.toml +++ b/bindings/matrix-sdk-ffi/Cargo.toml @@ -29,7 +29,7 @@ eyeball-im = { workspace = true } extension-trait = "1.0.1" futures-core = { workspace = true } futures-util = { workspace = true } -matrix-sdk-ui = { workspace = true, features = ["e2e-encryption", "uniffi"] } +matrix-sdk-ui = { workspace = true, features = ["e2e-encryption", "uniffi", "experimental-room-list-with-unified-invites"] } mime = "0.3.16" once_cell = { workspace = true } opentelemetry = "0.21.0" diff --git a/bindings/matrix-sdk-ffi/src/authentication_service.rs b/bindings/matrix-sdk-ffi/src/authentication_service.rs index 8ed863d5fab..4a5125432fc 100644 --- a/bindings/matrix-sdk-ffi/src/authentication_service.rs +++ b/bindings/matrix-sdk-ffi/src/authentication_service.rs @@ -16,10 +16,13 @@ use matrix_sdk::{ }, AuthorizationResponse, Oidc, OidcError, }, - AuthSession, + AuthSession, ClientBuildError as MatrixClientBuildError, HttpError, RumaApiError, }; use ruma::{ - api::client::discovery::discover_homeserver::AuthenticationServerInfo, IdParseError, + api::{ + client::discovery::discover_homeserver::AuthenticationServerInfo, + error::{DeserializationError, FromHttpResponseError}, + }, OwnedUserId, }; use url::Url; @@ -28,7 +31,7 @@ use zeroize::Zeroize; use super::{client::Client, client_builder::ClientBuilder, RUNTIME}; use crate::{ client::ClientSessionDelegate, - client_builder::{CertificateBytes, UrlScheme}, + client_builder::{CertificateBytes, ClientBuildError}, error::ClientError, }; @@ -44,6 +47,7 @@ pub struct AuthenticationService { cross_process_refresh_lock_id: Option, session_delegate: Option>, additional_root_certificates: Vec, + proxy: Option, } impl Drop for AuthenticationService { @@ -57,14 +61,23 @@ impl Drop for AuthenticationService { pub enum AuthenticationError { #[error("A successful call to configure_homeserver must be made first.")] ClientMissing, - #[error("{message}")] - InvalidServerName { message: String }, + + #[error("The supplied server name is invalid.")] + InvalidServerName, + #[error(transparent)] + ServerUnreachable(HttpError), + #[error(transparent)] + WellKnownLookupFailed(RumaApiError), + #[error(transparent)] + WellKnownDeserializationError(DeserializationError), #[error("The homeserver doesn't provide a trusted sliding sync proxy in its well-known configuration.")] SlidingSyncNotAvailable, + #[error("Login was successful but is missing a valid Session to configure the file store.")] SessionMissing, #[error("Failed to use the supplied base path.")] InvalidBasePath, + #[error( "The homeserver doesn't provide an authentication issuer in its well-known configuration." )] @@ -79,6 +92,7 @@ pub enum AuthenticationError { OidcCancelled, #[error("An error occurred with OIDC: {message}")] OidcError { message: String }, + #[error("An error occurred: {message}")] Generic { message: String }, } @@ -89,9 +103,27 @@ impl From for AuthenticationError { } } -impl From for AuthenticationError { - fn from(e: IdParseError) -> AuthenticationError { - AuthenticationError::InvalidServerName { message: e.to_string() } +impl From for AuthenticationError { + fn from(e: ClientBuildError) -> AuthenticationError { + match e { + ClientBuildError::Sdk(MatrixClientBuildError::InvalidServerName) => { + AuthenticationError::InvalidServerName + } + + ClientBuildError::Sdk(MatrixClientBuildError::Http(e)) => { + AuthenticationError::ServerUnreachable(e) + } + + ClientBuildError::Sdk(MatrixClientBuildError::AutoDiscovery( + FromHttpResponseError::Server(e), + )) => AuthenticationError::WellKnownLookupFailed(e), + + ClientBuildError::Sdk(MatrixClientBuildError::AutoDiscovery( + FromHttpResponseError::Deserialization(e), + )) => AuthenticationError::WellKnownDeserializationError(e), + + _ => AuthenticationError::Generic { message: e.to_string() }, + } } } @@ -190,6 +222,7 @@ impl AuthenticationService { passphrase: Option, user_agent: Option, additional_root_certificates: Vec>, + proxy: Option, oidc_configuration: Option, custom_sliding_sync_proxy: Option, session_delegate: Option>, @@ -206,6 +239,7 @@ impl AuthenticationService { session_delegate: session_delegate.map(Into::into), cross_process_refresh_lock_id, additional_root_certificates, + proxy, }) } @@ -220,47 +254,12 @@ impl AuthenticationService { server_name_or_homeserver_url: String, ) -> Result<(), AuthenticationError> { let mut builder = self.new_client_builder(); + builder = builder.server_name_or_homeserver_url(server_name_or_homeserver_url); - // Attempt discovery as a server name first. - let result = matrix_sdk::sanitize_server_name(&server_name_or_homeserver_url); - - match result { - Ok(server_name) => { - let protocol = if server_name_or_homeserver_url.starts_with("http://") { - UrlScheme::Http - } else { - UrlScheme::Https - }; - builder = builder.server_name_with_protocol(server_name.to_string(), protocol); - } - - Err(e) => { - // When the input isn't a valid server name check it is a URL. - // If this is the case, build the client with a homeserver URL. - if Url::parse(&server_name_or_homeserver_url).is_ok() { - builder = builder.homeserver_url(server_name_or_homeserver_url.clone()); - } else { - return Err(e.into()); - } - } - } - - let client = builder.build_inner().or_else(|e| { - if !server_name_or_homeserver_url.starts_with("http://") - && !server_name_or_homeserver_url.starts_with("https://") - { - return Err(e); - } - // When discovery fails, fallback to the homeserver URL if supplied. - let mut builder = self.new_client_builder(); - builder = builder.homeserver_url(server_name_or_homeserver_url); - builder.build_inner() - })?; - + let client = builder.build_inner()?; let details = RUNTIME.block_on(self.details_from_client(&client))?; - // Now we've verified that it's a valid homeserver, make sure - // there's a sliding sync proxy available one way or another. + // Make sure there's a sliding sync proxy available. if self.custom_sliding_sync_proxy.read().unwrap().is_none() && client.discovered_sliding_sync_proxy().is_none() { @@ -396,6 +395,10 @@ impl AuthenticationService { builder = builder.user_agent(user_agent); } + if let Some(proxy) = &self.proxy { + builder = builder.proxy(proxy.to_owned()) + } + builder = builder.add_root_certificates(self.additional_root_certificates.clone()); builder @@ -612,6 +615,10 @@ impl AuthenticationService { }) .username(user_id.to_string()); + if let Some(proxy) = &self.proxy { + client = client.proxy(proxy.to_owned()) + } + if let Some(id) = &self.cross_process_refresh_lock_id { let Some(ref session_delegate) = self.session_delegate else { return Err(AuthenticationError::OidcError { diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index 2545126da9b..54406134ef0 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -40,7 +40,10 @@ use matrix_sdk_ui::notification_client::NotificationProcessSetup as MatrixNotifi use mime::Mime; use ruma::{ api::client::discovery::discover_homeserver::AuthenticationServerInfo, - events::room::power_levels::RoomPowerLevelsEventContent, + events::{ + ignored_user_list::IgnoredUserListEventContent, + room::power_levels::RoomPowerLevelsEventContent, GlobalAccountDataEventType, + }, push::{HttpPusherData as RumaHttpPusherData, PushFormat as RumaPushFormat}, }; use serde::{Deserialize, Serialize}; @@ -645,22 +648,6 @@ impl Client { Ok(dm) } - pub fn ignore_user(&self, user_id: String) -> Result<(), ClientError> { - RUNTIME.block_on(async move { - let user_id = UserId::parse(user_id)?; - self.inner.account().ignore_user(&user_id).await?; - Ok(()) - }) - } - - pub fn unignore_user(&self, user_id: String) -> Result<(), ClientError> { - RUNTIME.block_on(async move { - let user_id = UserId::parse(user_id)?; - self.inner.account().unignore_user(&user_id).await?; - Ok(()) - }) - } - pub fn search_users( &self, search_term: String, @@ -710,6 +697,54 @@ impl Client { pub fn encryption(&self) -> Arc { Arc::new(self.inner.encryption().into()) } + + // Ignored users + + pub async fn ignored_users(&self) -> Result, ClientError> { + if let Some(raw_content) = self + .inner + .account() + .fetch_account_data(GlobalAccountDataEventType::IgnoredUserList) + .await? + { + let content = raw_content.deserialize_as::()?; + let user_ids: Vec = + content.ignored_users.keys().map(|id| id.to_string()).collect(); + + return Ok(user_ids); + } + + Ok(vec![]) + } + + pub async fn ignore_user(&self, user_id: String) -> Result<(), ClientError> { + let user_id = UserId::parse(user_id)?; + self.inner.account().ignore_user(&user_id).await?; + Ok(()) + } + + pub async fn unignore_user(&self, user_id: String) -> Result<(), ClientError> { + let user_id = UserId::parse(user_id)?; + self.inner.account().unignore_user(&user_id).await?; + Ok(()) + } + + pub fn subscribe_to_ignored_users( + &self, + listener: Box, + ) -> Arc { + let mut subscriber = self.inner.subscribe_to_ignore_user_list_changes(); + Arc::new(TaskHandle::new(RUNTIME.spawn(async move { + while let Some(user_ids) = subscriber.next().await { + listener.call(user_ids); + } + }))) + } +} + +#[uniffi::export(callback_interface)] +pub trait IgnoredUsersListener: Sync + Send { + fn call(&self, ignored_user_ids: Vec); } #[derive(uniffi::Enum)] @@ -766,6 +801,7 @@ impl From<&search_users::v3::User> for UserProfile { impl Client { fn process_session_change(&self, session_change: SessionChange) { if let Some(delegate) = self.delegate.read().unwrap().clone() { + debug!("Applying session change: {session_change:?}"); RUNTIME.spawn_blocking(move || match session_change { SessionChange::UnknownToken { soft_logout } => { delegate.did_receive_auth_error(soft_logout); @@ -774,6 +810,10 @@ impl Client { delegate.did_refresh_tokens(); } }); + } else { + debug!( + "No client delegate found, session change couldn't be applied: {session_change:?}" + ); } } diff --git a/bindings/matrix-sdk-ffi/src/client_builder.rs b/bindings/matrix-sdk-ffi/src/client_builder.rs index 2e4daad89ac..a26c683b9be 100644 --- a/bindings/matrix-sdk-ffi/src/client_builder.rs +++ b/bindings/matrix-sdk-ffi/src/client_builder.rs @@ -7,7 +7,8 @@ use matrix_sdk::{ api::{error::UnknownVersionError, MatrixVersion}, ServerName, UserId, }, - Client as MatrixClient, ClientBuilder as MatrixClientBuilder, + Client as MatrixClient, ClientBuildError as MatrixClientBuildError, + ClientBuilder as MatrixClientBuilder, IdParseError, }; use sanitize_filename_reader_friendly::sanitize; use url::Url; @@ -19,18 +20,51 @@ use crate::{client::ClientSessionDelegate, error::ClientError, helpers::unwrap_o /// A list of bytes containing a certificate in DER or PEM form. pub type CertificateBytes = Vec; -#[derive(Clone)] -pub(crate) enum UrlScheme { - Http, - Https, +#[derive(Debug, Clone)] +enum HomeserverConfig { + Url(String), + ServerName(String), + ServerNameOrUrl(String), +} + +#[derive(Debug, thiserror::Error, uniffi::Error)] +#[uniffi(flat_error)] +pub enum ClientBuildError { + #[error(transparent)] + Sdk(#[from] MatrixClientBuildError), + #[error("Failed to build the client: {message}")] + Generic { message: String }, +} + +impl From for ClientBuildError { + fn from(e: IdParseError) -> ClientBuildError { + ClientBuildError::Generic { message: format!("{e:#}") } + } +} + +impl From for ClientBuildError { + fn from(e: std::io::Error) -> ClientBuildError { + ClientBuildError::Generic { message: format!("{e:#}") } + } +} + +impl From for ClientBuildError { + fn from(e: url::ParseError) -> ClientBuildError { + ClientBuildError::Generic { message: format!("{e:#}") } + } +} + +impl From for ClientBuildError { + fn from(e: ClientError) -> ClientBuildError { + ClientBuildError::Generic { message: format!("{e:#}") } + } } #[derive(Clone, uniffi::Object)] pub struct ClientBuilder { base_path: Option, username: Option, - server_name: Option<(String, UrlScheme)>, - homeserver_url: Option, + homeserver_cfg: Option, server_versions: Option>, passphrase: Zeroizing>, user_agent: Option, @@ -51,8 +85,7 @@ impl ClientBuilder { Arc::new(Self { base_path: None, username: None, - server_name: None, - homeserver_url: None, + homeserver_cfg: None, server_versions: None, passphrase: Zeroizing::new(None), user_agent: None, @@ -108,14 +141,19 @@ impl ClientBuilder { pub fn server_name(self: Arc, server_name: String) -> Arc { let mut builder = unwrap_or_clone_arc(self); - // Assume HTTPS if no protocol is provided. - builder.server_name = Some((server_name, UrlScheme::Https)); + builder.homeserver_cfg = Some(HomeserverConfig::ServerName(server_name)); Arc::new(builder) } pub fn homeserver_url(self: Arc, url: String) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.homeserver_url = Some(url); + builder.homeserver_cfg = Some(HomeserverConfig::Url(url)); + Arc::new(builder) + } + + pub fn server_name_or_homeserver_url(self: Arc, server_name_or_url: String) -> Arc { + let mut builder = unwrap_or_clone_arc(self); + builder.homeserver_cfg = Some(HomeserverConfig::ServerNameOrUrl(server_name_or_url)); Arc::new(builder) } @@ -165,7 +203,7 @@ impl ClientBuilder { Arc::new(builder) } - pub fn build(self: Arc) -> Result, ClientError> { + pub fn build(self: Arc) -> Result, ClientBuildError> { Ok(Arc::new(self.build_inner()?)) } } @@ -200,17 +238,7 @@ impl ClientBuilder { Arc::new(builder) } - pub(crate) fn server_name_with_protocol( - self: Arc, - server_name: String, - protocol: UrlScheme, - ) -> Arc { - let mut builder = unwrap_or_clone_arc(self); - builder.server_name = Some((server_name, protocol)); - Arc::new(builder) - } - - pub(crate) fn build_inner(self: Arc) -> anyhow::Result { + pub(crate) fn build_inner(self: Arc) -> Result { let builder = unwrap_or_clone_arc(self); let mut inner_builder = builder.inner; @@ -223,22 +251,26 @@ impl ClientBuilder { } // Determine server either from URL, server name or user ID. - if let Some(homeserver_url) = builder.homeserver_url { - inner_builder = inner_builder.homeserver_url(homeserver_url); - } else if let Some((server_name, protocol)) = builder.server_name { - let server_name = ServerName::parse(server_name)?; - inner_builder = match protocol { - UrlScheme::Http => inner_builder.insecure_server_name_no_tls(&server_name), - UrlScheme::Https => inner_builder.server_name(&server_name), - }; - } else if let Some(username) = builder.username { - let user = UserId::parse(username)?; - inner_builder = inner_builder.server_name(user.server_name()); - } else { - anyhow::bail!( - "Failed to build: One of homeserver_url, server_name or username must be called." - ); - } + inner_builder = match builder.homeserver_cfg { + Some(HomeserverConfig::Url(url)) => inner_builder.homeserver_url(url), + Some(HomeserverConfig::ServerName(server_name)) => { + let server_name = ServerName::parse(server_name)?; + inner_builder.server_name(&server_name) + } + Some(HomeserverConfig::ServerNameOrUrl(server_name_or_url)) => { + inner_builder.server_name_or_homeserver_url(server_name_or_url) + } + None => { + if let Some(username) = builder.username { + let user = UserId::parse(username)?; + inner_builder.server_name(user.server_name()) + } else { + return Err(ClientBuildError::Generic { + message: "Failed to build: One of homeserver_url, server_name, server_name_or_homeserver_url or username must be called.".to_owned(), + }); + } + } + }; let mut certificates = Vec::new(); @@ -248,8 +280,10 @@ impl ClientBuilder { if let Ok(cert) = Certificate::from_der(&certificate) { certificates.push(cert); } else { - let cert = Certificate::from_pem(&certificate) - .map_err(|e| anyhow::anyhow!("Failed to add a root certificate {e:?}"))?; + let cert = + Certificate::from_pem(&certificate).map_err(|e| ClientBuildError::Generic { + message: format!("Failed to add a root certificate {e:?}"), + })?; certificates.push(cert); } } @@ -277,7 +311,8 @@ impl ClientBuilder { server_versions .iter() .map(|s| MatrixVersion::try_from(s.as_str())) - .collect::, UnknownVersionError>>()?, + .collect::, UnknownVersionError>>() + .map_err(|e| ClientBuildError::Generic { message: e.to_string() })?, ); } diff --git a/bindings/matrix-sdk-ffi/src/encryption.rs b/bindings/matrix-sdk-ffi/src/encryption.rs index 2fd59c53f48..6fd1ec59ffc 100644 --- a/bindings/matrix-sdk-ffi/src/encryption.rs +++ b/bindings/matrix-sdk-ffi/src/encryption.rs @@ -1,7 +1,10 @@ use std::sync::Arc; use futures_util::StreamExt; -use matrix_sdk::encryption::{backups, recovery}; +use matrix_sdk::{ + encryption, + encryption::{backups, recovery}, +}; use thiserror::Error; use zeroize::Zeroize; @@ -34,6 +37,11 @@ pub trait RecoveryStateListener: Sync + Send { fn on_update(&self, status: RecoveryState); } +#[uniffi::export(callback_interface)] +pub trait VerificationStateListener: Sync + Send { + fn on_update(&self, status: VerificationState); +} + #[derive(uniffi::Enum)] pub enum BackupUploadState { Waiting, @@ -186,6 +194,23 @@ impl From for EnableRecoveryProgress { } } +#[derive(uniffi::Enum)] +pub enum VerificationState { + Unknown, + Verified, + Unverified, +} + +impl From for VerificationState { + fn from(value: encryption::VerificationState) -> Self { + match &value { + encryption::VerificationState::Unknown => Self::Unknown, + encryption::VerificationState::Verified => Self::Verified, + encryption::VerificationState::Unverified => Self::Unverified, + } + } +} + #[uniffi::export(async_runtime = "tokio")] impl Encryption { pub fn backup_state_listener(&self, listener: Box) -> Arc { @@ -326,4 +351,20 @@ impl Encryption { Ok(result?) } + + pub fn verification_state(&self) -> VerificationState { + self.inner.verification_state().get().into() + } + + pub fn verification_state_listener( + self: Arc, + listener: Box, + ) -> Arc { + let mut subscriber = self.inner.verification_state(); + Arc::new(TaskHandle::new(RUNTIME.spawn(async move { + while let Some(verification_state) = subscriber.next().await { + listener.on_update(verification_state.into()); + } + }))) + } } diff --git a/bindings/matrix-sdk-ffi/src/error.rs b/bindings/matrix-sdk-ffi/src/error.rs index 2f9aab9b56e..27e7d9ed70a 100644 --- a/bindings/matrix-sdk-ffi/src/error.rs +++ b/bindings/matrix-sdk-ffi/src/error.rs @@ -1,13 +1,10 @@ use std::fmt::Display; use matrix_sdk::{ - self, encryption::CryptoStoreError, oidc::OidcError, HttpError, IdParseError, - NotificationSettingsError as SdkNotificationSettingsError, StoreError, -}; -use matrix_sdk_ui::{ - encryption_sync_service, event_cache::EventCacheError, notification_client, sync_service, - timeline, + encryption::CryptoStoreError, event_cache::EventCacheError, oidc::OidcError, HttpError, + IdParseError, NotificationSettingsError as SdkNotificationSettingsError, StoreError, }; +use matrix_sdk_ui::{encryption_sync_service, notification_client, sync_service, timeline}; use uniffi::UnexpectedUniFFICallbackError; #[derive(Debug, thiserror::Error)] diff --git a/bindings/matrix-sdk-ffi/src/notification_settings.rs b/bindings/matrix-sdk-ffi/src/notification_settings.rs index a4cfa77c902..81cbf6b8431 100644 --- a/bindings/matrix-sdk-ffi/src/notification_settings.rs +++ b/bindings/matrix-sdk-ffi/src/notification_settings.rs @@ -319,6 +319,13 @@ impl NotificationSettings { } } + /// Check whether [MSC 4028 push rule][rule] is enabled on the homeserver. + /// + /// [rule]: https://github.com/matrix-org/matrix-spec-proposals/blob/giomfo/push_encrypted_events/proposals/4028-push-all-encrypted-events-except-for-muted-rooms.md + pub async fn can_homeserver_push_encrypted_event_to_device(&self) -> bool { + self.sdk_client.can_homeserver_push_encrypted_event_to_device().await.unwrap() + } + /// Set whether user mentions are enabled. pub async fn set_user_mention_enabled( &self, diff --git a/bindings/matrix-sdk-ffi/src/platform.rs b/bindings/matrix-sdk-ffi/src/platform.rs index 4c1be173175..fed242ba5cf 100644 --- a/bindings/matrix-sdk-ffi/src/platform.rs +++ b/bindings/matrix-sdk-ffi/src/platform.rs @@ -6,6 +6,7 @@ use opentelemetry::KeyValue; use opentelemetry_otlp::{Protocol, WithExportConfig}; use opentelemetry_sdk::{runtime::RuntimeChannel, trace::Tracer, Resource}; use tokio::runtime::Handle; +use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_core::Subscriber; use tracing_subscriber::{ fmt::{self, time::FormatTime, FormatEvent, FormatFields, FormattedFields}, @@ -202,13 +203,26 @@ where } let file_layer = config.write_to_files.map(|c| { + let mut builder = RollingFileAppender::builder() + .rotation(Rotation::HOURLY) + .filename_prefix(&c.file_prefix); + + if let Some(max_files) = c.max_files { + builder = builder.max_log_files(max_files as usize) + }; + if let Some(file_suffix) = c.file_suffix { + builder = builder.filename_suffix(file_suffix) + } + + let writer = builder.build(&c.path).expect("Failed to create a rolling file appender."); + fmt::layer() .event_format(EventFormatter::new()) // EventFormatter doesn't support ANSI colors anyways, but the // default field formatter does, which is unhelpful for iOS + // Android logs, but enabled by default. .with_ansi(false) - .with_writer(tracing_appender::rolling::hourly(c.path, c.file_prefix)) + .with_writer(writer) }); Layer::and_then( @@ -237,6 +251,8 @@ where pub struct TracingFileConfiguration { path: String, file_prefix: String, + file_suffix: Option, + max_files: Option, } #[derive(uniffi::Record)] diff --git a/bindings/matrix-sdk-ffi/src/room.rs b/bindings/matrix-sdk-ffi/src/room.rs index 6f3af113fcd..375f0a2e0a8 100644 --- a/bindings/matrix-sdk-ffi/src/room.rs +++ b/bindings/matrix-sdk-ffi/src/room.rs @@ -1,8 +1,8 @@ -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use anyhow::{Context, Result}; use matrix_sdk::{ - room::{power_levels::RoomPowerLevelChanges, Room as SdkRoom}, + room::{power_levels::RoomPowerLevelChanges, Room as SdkRoom, RoomMemberRole}, RoomMemberships, RoomState, }; use matrix_sdk_ui::timeline::RoomExt; @@ -10,7 +10,13 @@ use mime::Mime; use ruma::{ api::client::room::report_content, assign, - events::room::{avatar::ImageInfo as RumaAvatarImageInfo, MediaSource}, + events::{ + room::{ + avatar::ImageInfo as RumaAvatarImageInfo, + power_levels::RoomPowerLevels as RumaPowerLevels, MediaSource, + }, + TimelineEventType, + }, EventId, Int, UserId, }; use tokio::sync::RwLock; @@ -128,21 +134,28 @@ impl Room { self.inner.active_room_call_participants().iter().map(|u| u.to_string()).collect() } - pub fn inviter(&self) -> Option> { + pub fn inviter(&self) -> Option { if self.inner.state() == RoomState::Invited { RUNTIME.block_on(async move { - self.inner - .invite_details() - .await - .ok() - .and_then(|a| a.inviter) - .map(|m| Arc::new(RoomMember::new(m))) + self.inner.invite_details().await.ok().and_then(|a| a.inviter).map(|m| m.into()) }) } else { None } } + /// Forces the currently active room key, which is used to encrypt messages, + /// to be rotated. + /// + /// A new room key will be crated and shared with all the room members the + /// next time a message will be sent. You don't have to call this method, + /// room keys will be rotated automatically when necessary. This method is + /// still useful for debugging purposes. + pub async fn discard_room_key(&self) -> Result<(), ClientError> { + self.inner.discard_room_key().await?; + Ok(()) + } + pub async fn timeline(&self) -> Result, ClientError> { let mut write_guard = self.timeline.write().await; if let Some(timeline) = &*write_guard { @@ -177,10 +190,10 @@ impl Room { ))) } - pub async fn member(&self, user_id: String) -> Result, ClientError> { + pub async fn member(&self, user_id: String) -> Result { let user_id = UserId::parse(&*user_id).context("Invalid user id.")?; let member = self.inner.get_member(&user_id).await?.context("No user found")?; - Ok(Arc::new(RoomMember::new(member))) + Ok(member.into()) } pub fn member_avatar_url(&self, user_id: String) -> Result, ClientError> { @@ -341,13 +354,11 @@ impl Room { /// /// # Arguments /// - /// * `event_id` - The ID of the user to ignore. - pub fn ignore_user(&self, user_id: String) -> Result<(), ClientError> { - RUNTIME.block_on(async move { - let user_id = UserId::parse(user_id)?; - self.inner.client().account().ignore_user(&user_id).await?; - Ok(()) - }) + /// * `user_id` - The ID of the user to ignore. + pub async fn ignore_user(&self, user_id: String) -> Result<(), ClientError> { + let user_id = UserId::parse(user_id)?; + self.inner.client().account().ignore_user(&user_id).await?; + Ok(()) } /// Leave this room. @@ -524,7 +535,7 @@ impl Room { Ok(self.inner.typing_notice(is_typing).await?) } - pub async fn subscribe_to_typing_notifications( + pub fn subscribe_to_typing_notifications( self: Arc, listener: Box, ) -> Arc { @@ -556,11 +567,9 @@ impl Room { Ok(()) } - pub async fn build_power_level_changes_from_current( - &self, - ) -> Result { + pub async fn get_power_levels(&self) -> Result { let power_levels = self.inner.room_power_levels().await?; - Ok(power_levels.into()) + Ok(RoomPowerLevels::from(power_levels)) } pub async fn apply_power_level_changes( @@ -571,19 +580,85 @@ impl Room { Ok(()) } - pub async fn update_power_level_for_user( + pub async fn update_power_levels_for_users( &self, - user_id: String, - power_level: i64, + updates: Vec, ) -> Result<(), ClientError> { - let user_id = UserId::parse(&user_id)?; - let power_level = Int::new(power_level).context("Invalid power level")?; + let updates = updates + .iter() + .map(|update| { + let user_id: &UserId = update.user_id.as_str().try_into()?; + let power_level = Int::new(update.power_level).context("Invalid power level")?; + Ok((user_id, power_level)) + }) + .collect::>>()?; + self.inner - .update_power_levels(vec![(&user_id, power_level)]) + .update_power_levels(updates) .await .map_err(|e| ClientError::Generic { msg: e.to_string() })?; Ok(()) } + + pub async fn suggested_role_for_user( + &self, + user_id: String, + ) -> Result { + let user_id = UserId::parse(&user_id)?; + Ok(self.inner.get_suggested_user_role(&user_id).await?) + } + + pub async fn reset_power_levels(&self) -> Result { + Ok(RoomPowerLevels::from(self.inner.reset_power_levels().await?)) + } +} + +#[derive(uniffi::Record)] +pub struct RoomPowerLevels { + /// The level required to ban a user. + pub ban: i64, + /// The level required to invite a user. + pub invite: i64, + /// The level required to kick a user. + pub kick: i64, + /// The level required to redact an event. + pub redact: i64, + /// The default level required to send message events. + pub events_default: i64, + /// The default level required to send state events. + pub state_default: i64, + /// The default power level for every user in the room. + pub users_default: i64, + /// The level required to change the room's name. + pub room_name: i64, + /// The level required to change the room's avatar. + pub room_avatar: i64, + /// The level required to change the room's topic. + pub room_topic: i64, +} + +impl From for RoomPowerLevels { + fn from(value: RumaPowerLevels) -> Self { + fn state_event_level_for( + power_levels: &RumaPowerLevels, + event_type: &TimelineEventType, + ) -> i64 { + let default_state: i64 = power_levels.state_default.into(); + power_levels.events.get(event_type).map_or(default_state, |&level| level.into()) + } + Self { + ban: value.ban.into(), + invite: value.invite.into(), + kick: value.kick.into(), + redact: value.redact.into(), + events_default: value.events_default.into(), + state_default: value.state_default.into(), + users_default: value.users_default.into(), + room_name: state_event_level_for(&value, &TimelineEventType::RoomName), + room_avatar: state_event_level_for(&value, &TimelineEventType::RoomAvatar), + room_topic: state_event_level_for(&value, &TimelineEventType::RoomTopic), + } + } } #[uniffi::export(callback_interface)] @@ -613,13 +688,22 @@ impl RoomMembersIterator { self.chunk_iterator.len() } - fn next_chunk(&self, chunk_size: u32) -> Option>> { + fn next_chunk(&self, chunk_size: u32) -> Option> { self.chunk_iterator .next(chunk_size) - .map(|members| members.into_iter().map(RoomMember::new).map(Arc::new).collect()) + .map(|members| members.into_iter().map(|m| m.into()).collect()) } } +/// An update for a particular user's power level within the room. +#[derive(uniffi::Record)] +pub struct UserPowerLevelUpdate { + /// The user ID of the user to update. + user_id: String, + /// The power level to assign to the user. + power_level: i64, +} + impl TryFrom for RumaAvatarImageInfo { type Error = MediaInfoError; diff --git a/bindings/matrix-sdk-ffi/src/room_info.rs b/bindings/matrix-sdk-ffi/src/room_info.rs index 676f3d58773..5d3f31e5c4e 100644 --- a/bindings/matrix-sdk-ffi/src/room_info.rs +++ b/bindings/matrix-sdk-ffi/src/room_info.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use matrix_sdk::RoomState; use ruma::OwnedMxcUri; @@ -23,10 +23,11 @@ pub struct RoomInfo { alternative_aliases: Vec, membership: Membership, latest_event: Option>, - inviter: Option>, + inviter: Option, active_members_count: u64, invited_members_count: u64, joined_members_count: u64, + user_power_levels: HashMap, highlight_count: u64, notification_count: u64, user_defined_notification_mode: Option, @@ -53,6 +54,12 @@ impl RoomInfo { ) -> matrix_sdk::Result { let unread_notification_counts = room.unread_notification_counts(); + let power_levels_map = room.users_with_power_levels().await; + let mut user_power_levels = HashMap::::new(); + for (id, level) in power_levels_map.iter() { + user_power_levels.insert(id.to_string(), *level); + } + Ok(Self { id: room.room_id().to_string(), name: room.name(), @@ -68,14 +75,13 @@ impl RoomInfo { membership: room.state().into(), latest_event, inviter: match room.state() { - RoomState::Invited => { - room.invite_details().await?.inviter.map(|inner| Arc::new(RoomMember { inner })) - } + RoomState::Invited => room.invite_details().await?.inviter.map(|m| m.into()), _ => None, }, active_members_count: room.active_members_count(), invited_members_count: room.invited_members_count(), joined_members_count: room.joined_members_count(), + user_power_levels, highlight_count: unread_notification_counts.highlight_count, notification_count: unread_notification_counts.notification_count, user_defined_notification_mode: room diff --git a/bindings/matrix-sdk-ffi/src/room_list.rs b/bindings/matrix-sdk-ffi/src/room_list.rs index ea9ca3b53a6..362bb55267c 100644 --- a/bindings/matrix-sdk-ffi/src/room_list.rs +++ b/bindings/matrix-sdk-ffi/src/room_list.rs @@ -15,13 +15,15 @@ use matrix_sdk::{ use matrix_sdk_ui::{ room_list_service::{ filters::{ - new_filter_all, new_filter_any, new_filter_category, new_filter_fuzzy_match_room_name, - new_filter_non_left, new_filter_none, new_filter_normalized_match_room_name, - new_filter_unread, RoomCategory, + new_filter_all, new_filter_any, new_filter_category, new_filter_favourite, + new_filter_fuzzy_match_room_name, new_filter_invite, new_filter_non_left, + new_filter_none, new_filter_normalized_match_room_name, new_filter_unread, + RoomCategory, }, BoxedFilterFn, }, timeline::default_event_filter, + unable_to_decrypt_hook::UtdHookManager, }; use tokio::sync::RwLock; @@ -52,6 +54,8 @@ pub enum RoomListError { TimelineNotInitialized { room_name: String }, #[error("Timeline couldn't be initialized: {error}")] InitializingTimeline { error: String }, + #[error("Event cache ran into an error: {error}")] + EventCache { error: String }, } impl From for RoomListError { @@ -69,6 +73,7 @@ impl From for RoomListError { InitializingTimeline(source) => { Self::InitializingTimeline { error: source.to_string() } } + EventCache(error) => Self::EventCache { error: error.to_string() }, } } } @@ -103,6 +108,7 @@ impl From for matrix_sdk_ui::room_list_service::Input { #[derive(uniffi::Object)] pub struct RoomListService { pub(crate) inner: Arc, + pub(crate) utd_hook: Option>, } #[uniffi::export(async_runtime = "tokio")] @@ -124,6 +130,7 @@ impl RoomListService { Ok(Arc::new(RoomListItem { inner: Arc::new(RUNTIME.block_on(async { self.inner.room(room_id).await })?), + utd_hook: self.utd_hook.clone(), })) } @@ -417,6 +424,8 @@ pub enum RoomListEntriesDynamicFilterKind { Any { filters: Vec }, NonLeft, Unread, + Favourite, + Invite, Category { expect: RoomListFilterCategory }, None, NormalizedMatchRoomName { pattern: String }, @@ -455,6 +464,8 @@ impl FilterWrapper { ))), Kind::NonLeft => Self(Box::new(new_filter_non_left(client))), Kind::Unread => Self(Box::new(new_filter_unread(client))), + Kind::Favourite => Self(Box::new(new_filter_favourite(client))), + Kind::Invite => Self(Box::new(new_filter_invite(client))), Kind::Category { expect } => Self(Box::new(new_filter_category(client, expect.into()))), Kind::None => Self(Box::new(new_filter_none())), Kind::NormalizedMatchRoomName { pattern } => { @@ -470,6 +481,7 @@ impl FilterWrapper { #[derive(uniffi::Object)] pub struct RoomListItem { inner: Arc, + utd_hook: Option>, } #[uniffi::export(async_runtime = "tokio")] @@ -529,13 +541,23 @@ impl RoomListItem { &self, event_type_filter: Option>, ) -> Result<(), RoomListError> { - let mut timeline_builder = self.inner.default_room_timeline_builder().await; + let mut timeline_builder = self + .inner + .default_room_timeline_builder() + .await + .map_err(|err| RoomListError::InitializingTimeline { error: err.to_string() })?; + if let Some(event_type_filter) = event_type_filter { timeline_builder = timeline_builder.event_filter(move |event, room_version_id| { // Always perform the default filter first default_event_filter(event, room_version_id) && event_type_filter.filter(event) }); } + + if let Some(utd_hook) = self.utd_hook.clone() { + timeline_builder = timeline_builder.with_unable_to_decrypt_hook(utd_hook); + } + self.inner.init_timeline_with_builder(timeline_builder).map_err(RoomListError::from).await } diff --git a/bindings/matrix-sdk-ffi/src/room_member.rs b/bindings/matrix-sdk-ffi/src/room_member.rs index 3c3c6a941d1..0d6117ed344 100644 --- a/bindings/matrix-sdk-ffi/src/room_member.rs +++ b/bindings/matrix-sdk-ffi/src/room_member.rs @@ -1,11 +1,5 @@ use matrix_sdk::room::{RoomMember as SdkRoomMember, RoomMemberRole}; -use super::RUNTIME; -use crate::{ - event::{MessageLikeEventType, StateEventType}, - ClientError, -}; - #[derive(Clone, uniffi::Enum)] pub enum MembershipState { /// The user is banned. @@ -45,112 +39,43 @@ impl From for Membershi } } -#[derive(uniffi::Object)] -pub struct RoomMember { - pub(crate) inner: SdkRoomMember, +#[uniffi::export] +pub fn suggested_role_for_power_level(power_level: i64) -> RoomMemberRole { + // It's not possible to expose the constructor on the Enum through Uniffi ☹️ + RoomMemberRole::suggested_role_for_power_level(power_level) } #[uniffi::export] -impl RoomMember { - pub fn user_id(&self) -> String { - self.inner.user_id().to_string() - } - - pub fn display_name(&self) -> Option { - self.inner.display_name().map(|d| d.to_owned()) - } - - pub fn avatar_url(&self) -> Option { - self.inner.avatar_url().map(ToString::to_string) - } - - pub fn membership(&self) -> MembershipState { - self.inner.membership().to_owned().into() - } - - pub fn is_name_ambiguous(&self) -> bool { - self.inner.name_ambiguous() - } - - pub fn power_level(&self) -> i64 { - self.inner.power_level() - } - - pub fn suggested_role_for_power_level(&self) -> RoomMemberRole { - self.inner.suggested_role_for_power_level() - } - - pub fn normalized_power_level(&self) -> i64 { - self.inner.normalized_power_level() - } - - pub fn is_ignored(&self) -> bool { - self.inner.is_ignored() - } - - pub fn is_account_user(&self) -> bool { - self.inner.is_account_user() - } - - /// Adds the room member to the current account data's ignore list - /// which will ignore the user across all rooms. - pub fn ignore(&self) -> Result<(), ClientError> { - RUNTIME.block_on(async move { - self.inner.ignore().await?; - Ok(()) - }) - } - - /// Removes the room member from the current account data's ignore list - /// which will unignore the user across all rooms. - pub fn unignore(&self) -> Result<(), ClientError> { - RUNTIME.block_on(async move { - self.inner.unignore().await?; - Ok(()) - }) - } - - pub fn can_ban(&self) -> bool { - self.inner.can_ban() - } - - pub fn can_invite(&self) -> bool { - self.inner.can_invite() - } - - pub fn can_kick(&self) -> bool { - self.inner.can_kick() - } - - pub fn can_redact_own(&self) -> bool { - self.inner.can_redact_own() - } - - pub fn can_redact_other(&self) -> bool { - self.inner.can_redact_other() - } - - pub fn can_send_state(&self, state_event: StateEventType) -> bool { - self.inner.can_send_state(state_event.into()) - } - - pub fn can_send_message(&self, event: MessageLikeEventType) -> bool { - self.inner.can_send_message(event.into()) - } - - pub fn can_trigger_room_notification(&self) -> bool { - self.inner.can_trigger_room_notification() - } +pub fn suggested_power_level_for_role(role: RoomMemberRole) -> i64 { + // It's not possible to expose methods on an Enum through Uniffi ☹️ + role.suggested_power_level() } -impl RoomMember { - pub fn new(room_member: SdkRoomMember) -> Self { - RoomMember { inner: room_member } - } +#[derive(uniffi::Record)] +pub struct RoomMember { + pub user_id: String, + pub display_name: Option, + pub avatar_url: Option, + pub membership: MembershipState, + pub is_name_ambiguous: bool, + pub power_level: i64, + pub normalized_power_level: i64, + pub is_ignored: bool, + pub suggested_role_for_power_level: RoomMemberRole, } -#[uniffi::export] -pub fn suggested_role_for_power_level(power_level: i64) -> RoomMemberRole { - // It's not possible to expose the constructor on the Enum through Uniffi ☹️ - RoomMemberRole::suggested_role_for_power_level(power_level) +impl From for RoomMember { + fn from(m: SdkRoomMember) -> Self { + RoomMember { + user_id: m.user_id().to_string(), + display_name: m.display_name().map(|s| s.to_owned()), + avatar_url: m.avatar_url().map(|a| a.to_string()), + membership: m.membership().clone().into(), + is_name_ambiguous: m.name_ambiguous(), + power_level: m.power_level(), + normalized_power_level: m.normalized_power_level(), + is_ignored: m.is_ignored(), + suggested_role_for_power_level: m.suggested_role_for_power_level(), + } + } } diff --git a/bindings/matrix-sdk-ffi/src/ruma.rs b/bindings/matrix-sdk-ffi/src/ruma.rs index cc0e317e860..fc85768e3f9 100644 --- a/bindings/matrix-sdk-ffi/src/ruma.rs +++ b/bindings/matrix-sdk-ffi/src/ruma.rs @@ -174,18 +174,25 @@ impl TryFrom for RumaMessageType { } MessageType::Image { content } => Self::Image( RumaImageMessageEventContent::new(content.body, (*content.source).clone()) + .formatted(content.formatted.map(Into::into)) + .filename(content.filename) .info(content.info.map(Into::into).map(Box::new)), ), MessageType::Audio { content } => Self::Audio( RumaAudioMessageEventContent::new(content.body, (*content.source).clone()) + .formatted(content.formatted.map(Into::into)) + .filename(content.filename) .info(content.info.map(Into::into).map(Box::new)), ), MessageType::Video { content } => Self::Video( RumaVideoMessageEventContent::new(content.body, (*content.source).clone()) + .formatted(content.formatted.map(Into::into)) + .filename(content.filename) .info(content.info.map(Into::into).map(Box::new)), ), MessageType::File { content } => Self::File( RumaFileMessageEventContent::new(content.body, (*content.source).clone()) + .formatted(content.formatted.map(Into::into)) .filename(content.filename) .info(content.info.map(Into::into).map(Box::new)), ), @@ -221,6 +228,8 @@ impl From for MessageType { RumaMessageType::Image(c) => MessageType::Image { content: ImageMessageContent { body: c.body.clone(), + formatted: c.formatted.as_ref().map(Into::into), + filename: c.filename.clone(), source: Arc::new(c.source.clone()), info: c.info.as_deref().map(Into::into), }, @@ -228,6 +237,8 @@ impl From for MessageType { RumaMessageType::Audio(c) => MessageType::Audio { content: AudioMessageContent { body: c.body.clone(), + formatted: c.formatted.as_ref().map(Into::into), + filename: c.filename.clone(), source: Arc::new(c.source.clone()), info: c.info.as_deref().map(Into::into), audio: c.audio.map(Into::into), @@ -237,6 +248,8 @@ impl From for MessageType { RumaMessageType::Video(c) => MessageType::Video { content: VideoMessageContent { body: c.body.clone(), + formatted: c.formatted.as_ref().map(Into::into), + filename: c.filename.clone(), source: Arc::new(c.source.clone()), info: c.info.as_deref().map(Into::into), }, @@ -244,6 +257,7 @@ impl From for MessageType { RumaMessageType::File(c) => MessageType::File { content: FileMessageContent { body: c.body.clone(), + formatted: c.formatted.as_ref().map(Into::into), filename: c.filename.clone(), source: Arc::new(c.source.clone()), info: c.info.as_deref().map(Into::into), @@ -295,6 +309,8 @@ pub struct EmoteMessageContent { #[derive(Clone, uniffi::Record)] pub struct ImageMessageContent { pub body: String, + pub formatted: Option, + pub filename: Option, pub source: Arc, pub info: Option, } @@ -302,6 +318,8 @@ pub struct ImageMessageContent { #[derive(Clone, uniffi::Record)] pub struct AudioMessageContent { pub body: String, + pub formatted: Option, + pub filename: Option, pub source: Arc, pub info: Option, pub audio: Option, @@ -311,6 +329,8 @@ pub struct AudioMessageContent { #[derive(Clone, uniffi::Record)] pub struct VideoMessageContent { pub body: String, + pub formatted: Option, + pub filename: Option, pub source: Arc, pub info: Option, } @@ -318,6 +338,7 @@ pub struct VideoMessageContent { #[derive(Clone, uniffi::Record)] pub struct FileMessageContent { pub body: String, + pub formatted: Option, pub filename: Option, pub source: Arc, pub info: Option, diff --git a/bindings/matrix-sdk-ffi/src/sync_service.rs b/bindings/matrix-sdk-ffi/src/sync_service.rs index b1441ea6cd2..4b0c05489a5 100644 --- a/bindings/matrix-sdk-ffi/src/sync_service.rs +++ b/bindings/matrix-sdk-ffi/src/sync_service.rs @@ -12,13 +12,18 @@ // See the License for that specific language governing permissions and // limitations under the License. -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use futures_util::pin_mut; use matrix_sdk::Client; -use matrix_sdk_ui::sync_service::{ - State as MatrixSyncServiceState, SyncService as MatrixSyncService, - SyncServiceBuilder as MatrixSyncServiceBuilder, +use matrix_sdk_ui::{ + sync_service::{ + State as MatrixSyncServiceState, SyncService as MatrixSyncService, + SyncServiceBuilder as MatrixSyncServiceBuilder, + }, + unable_to_decrypt_hook::{ + UnableToDecryptHook, UnableToDecryptInfo as SdkUnableToDecryptInfo, UtdHookManager, + }, }; use crate::{ @@ -53,12 +58,16 @@ pub trait SyncServiceStateObserver: Send + Sync + Debug { #[derive(uniffi::Object)] pub struct SyncService { pub(crate) inner: Arc, + utd_hook: Option>, } #[uniffi::export(async_runtime = "tokio")] impl SyncService { pub fn room_list_service(&self) -> Arc { - Arc::new(RoomListService { inner: self.inner.room_list_service() }) + Arc::new(RoomListService { + inner: self.inner.room_list_service(), + utd_hook: self.utd_hook.clone(), + }) } pub async fn start(&self) { @@ -85,24 +94,106 @@ impl SyncService { #[derive(Clone, uniffi::Object)] pub struct SyncServiceBuilder { builder: MatrixSyncServiceBuilder, + + utd_hook: Option>, } impl SyncServiceBuilder { pub(crate) fn new(client: Client) -> Arc { - Arc::new(Self { builder: MatrixSyncService::builder(client) }) + Arc::new(Self { builder: MatrixSyncService::builder(client), utd_hook: None }) } } #[uniffi::export(async_runtime = "tokio")] impl SyncServiceBuilder { + pub fn with_unified_invites_in_room_list( + self: Arc, + with_unified_invites: bool, + ) -> Arc { + let this = unwrap_or_clone_arc(self); + let builder = this.builder.with_unified_invites_in_room_list(with_unified_invites); + Arc::new(Self { builder, utd_hook: this.utd_hook }) + } + pub fn with_cross_process_lock(self: Arc, app_identifier: Option) -> Arc { let this = unwrap_or_clone_arc(self); let builder = this.builder.with_cross_process_lock(app_identifier); - Arc::new(Self { builder }) + Arc::new(Self { builder, utd_hook: this.utd_hook }) + } + + pub fn with_utd_hook(self: Arc, delegate: Box) -> Arc { + // UTDs detected before this duration may be reclassified as "late decryption" + // events (or discarded, if they get decrypted fast enough). + const UTD_HOOK_GRACE_PERIOD: Duration = Duration::from_secs(60); + + let this = unwrap_or_clone_arc(self); + let utd_hook = Some(Arc::new( + UtdHookManager::new(Arc::new(UtdHook { delegate })) + .with_max_delay(UTD_HOOK_GRACE_PERIOD), + )); + Arc::new(Self { builder: this.builder, utd_hook }) } pub async fn finish(self: Arc) -> Result, ClientError> { let this = unwrap_or_clone_arc(self); - Ok(Arc::new(SyncService { inner: Arc::new(this.builder.build().await?) })) + Ok(Arc::new(SyncService { + inner: Arc::new(this.builder.build().await?), + utd_hook: this.utd_hook, + })) + } +} + +#[uniffi::export(callback_interface)] +pub trait UnableToDecryptDelegate: Sync + Send { + fn on_utd(&self, info: UnableToDecryptInfo); +} + +struct UtdHook { + delegate: Box, +} + +impl std::fmt::Debug for UtdHook { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("UtdHook").finish_non_exhaustive() + } +} + +impl UnableToDecryptHook for UtdHook { + fn on_utd(&self, info: SdkUnableToDecryptInfo) { + const IGNORE_UTD_PERIOD: Duration = Duration::from_secs(4); + + // UTDs that have been decrypted in the `IGNORE_UTD_PERIOD` are just ignored and + // not considered UTDs. + if let Some(duration) = &info.time_to_decrypt { + if *duration < IGNORE_UTD_PERIOD { + return; + } + } + + // Report the UTD to the client. + self.delegate.on_utd(info.into()); + } +} + +#[derive(uniffi::Record)] +pub struct UnableToDecryptInfo { + /// The identifier of the event that couldn't get decrypted. + event_id: String, + + /// If the event could be decrypted late (that is, the event was encrypted + /// at first, but could be decrypted later on), then this indicates the + /// time it took to decrypt the event. If it is not set, this is + /// considered a definite UTD. + /// + /// If set, this is in milliseconds. + pub time_to_decrypt_ms: Option, +} + +impl From for UnableToDecryptInfo { + fn from(value: SdkUnableToDecryptInfo) -> Self { + Self { + event_id: value.event_id.to_string(), + time_to_decrypt_ms: value.time_to_decrypt.map(|ttd| ttd.as_millis() as u64), + } } } diff --git a/bindings/matrix-sdk-ffi/src/timeline/content.rs b/bindings/matrix-sdk-ffi/src/timeline/content.rs index 3b7f6fcf928..de9302b7e07 100644 --- a/bindings/matrix-sdk-ffi/src/timeline/content.rs +++ b/bindings/matrix-sdk-ffi/src/timeline/content.rs @@ -41,6 +41,7 @@ impl TimelineItemContent { } } Content::Poll(poll_state) => TimelineItemContentKind::from(poll_state.results()), + Content::CallInvite => TimelineItemContentKind::CallInvite, Content::UnableToDecrypt(msg) => { TimelineItemContentKind::UnableToDecrypt { msg: EncryptedMessage::new(msg) } } @@ -113,6 +114,7 @@ pub enum TimelineItemContentKind { end_time: Option, has_been_edited: bool, }, + CallInvite, UnableToDecrypt { msg: EncryptedMessage, }, @@ -308,7 +310,7 @@ pub enum OtherState { RoomJoinRules, RoomName { name: Option }, RoomPinnedEvents, - RoomPowerLevels { users: HashMap }, + RoomPowerLevels { users: HashMap, previous: Option> }, RoomServerAcl, RoomThirdPartyInvite { display_name: Option }, RoomTombstone, @@ -351,18 +353,20 @@ impl From<&matrix_sdk_ui::timeline::AnyOtherFullStateEventContent> for OtherStat Self::RoomName { name } } Content::RoomPinnedEvents(_) => Self::RoomPinnedEvents, - Content::RoomPowerLevels(c) => { - let changes = match c { - FullContent::Original { content, prev_content } => { - power_level_user_changes(content, prev_content) - .iter() - .map(|(k, v)| (k.to_string(), *v)) - .collect() - } - FullContent::Redacted(_) => Default::default(), - }; - Self::RoomPowerLevels { users: changes } - } + Content::RoomPowerLevels(c) => match c { + FullContent::Original { content, prev_content } => Self::RoomPowerLevels { + users: power_level_user_changes(content, prev_content) + .iter() + .map(|(k, v)| (k.to_string(), *v)) + .collect(), + previous: prev_content.as_ref().map(|prev_content| { + prev_content.users.iter().map(|(k, &v)| (k.to_string(), v.into())).collect() + }), + }, + FullContent::Redacted(_) => { + Self::RoomPowerLevels { users: Default::default(), previous: None } + } + }, Content::RoomServerAcl(_) => Self::RoomServerAcl, Content::RoomThirdPartyInvite(c) => { let display_name = match c { diff --git a/crates/matrix-sdk-base/src/client.rs b/crates/matrix-sdk-base/src/client.rs index 743a48d8092..cb8463ad555 100644 --- a/crates/matrix-sdk-base/src/client.rs +++ b/crates/matrix-sdk-base/src/client.rs @@ -35,6 +35,7 @@ use ruma::events::{ use ruma::{ api::client as api, events::{ + ignored_user_list::IgnoredUserListEvent, push_rules::{PushRulesEvent, PushRulesEventContent}, room::{ member::{MembershipState, SyncRoomMemberEvent}, @@ -44,7 +45,7 @@ use ruma::{ }, AnyGlobalAccountDataEvent, AnyRoomAccountDataEvent, AnyStrippedStateEvent, AnySyncEphemeralRoomEvent, AnySyncMessageLikeEvent, AnySyncStateEvent, - AnySyncTimelineEvent, GlobalAccountDataEventType, StateEventType, + AnySyncTimelineEvent, GlobalAccountDataEventType, StateEvent, StateEventType, }, push::{Action, PushConditionRoomCtx, Ruleset}, serde::Raw, @@ -57,11 +58,11 @@ use tracing::{debug, info, instrument, trace, warn}; #[cfg(all(feature = "e2e-encryption", feature = "experimental-sliding-sync"))] use crate::latest_event::{is_suitable_for_latest_event, LatestEvent, PossibleLatestEvent}; +#[cfg(feature = "e2e-encryption")] +use crate::RoomMemberships; use crate::{ - deserialized_responses::{ - AmbiguityChanges, MembersResponse, RawAnySyncOrStrippedTimelineEvent, SyncTimelineEvent, - }, - error::Result, + deserialized_responses::{RawAnySyncOrStrippedTimelineEvent, SyncTimelineEvent}, + error::{Error, Result}, rooms::{normal::RoomInfoUpdate, Room, RoomInfo, RoomState}, store::{ ambiguity_map::AmbiguityCache, DynStateStore, MemoryStore, Result as StoreResult, @@ -70,8 +71,6 @@ use crate::{ sync::{JoinedRoomUpdate, LeftRoomUpdate, Notification, RoomUpdates, SyncResponse, Timeline}, RoomStateFilter, SessionMeta, }; -#[cfg(feature = "e2e-encryption")] -use crate::{error::Error, RoomMemberships}; /// A no IO Client implementation. /// @@ -93,7 +92,7 @@ pub struct BaseClient { #[cfg(feature = "e2e-encryption")] olm_machine: Arc>>, /// Observable of when a user is ignored/unignored. - pub(crate) ignore_user_list_changes: SharedObservable<()>, + pub(crate) ignore_user_list_changes: SharedObservable>, /// A sender that is used to communicate changes to room information. Each /// event contains the room and a boolean whether this event should @@ -719,11 +718,14 @@ impl BaseClient { // We found an event we can decrypt if let Ok(any_sync_event) = decrypted.event.deserialize() { // We can deserialize it to find its type - if let PossibleLatestEvent::YesRoomMessage(_) = - is_suitable_for_latest_event(&any_sync_event) - { - // The event is the right type for us to use as latest_event - return Some((Box::new(LatestEvent::new(decrypted)), i)); + match is_suitable_for_latest_event(&any_sync_event) { + PossibleLatestEvent::YesRoomMessage(_) + | PossibleLatestEvent::YesPoll(_) + | PossibleLatestEvent::YesCallInvite(_) => { + // The event is the right type for us to use as latest_event + return Some((Box::new(LatestEvent::new(decrypted)), i)); + } + _ => (), } } } @@ -1070,7 +1072,21 @@ impl BaseClient { pub(crate) fn apply_changes(&self, changes: &StateChanges, trigger_room_list_update: bool) { if changes.account_data.contains_key(&GlobalAccountDataEventType::IgnoredUserList) { - self.ignore_user_list_changes.set(()); + if let Some(event) = + changes.account_data.get(&GlobalAccountDataEventType::IgnoredUserList) + { + match event.deserialize_as::() { + Ok(event) => { + let user_ids: Vec = + event.content.ignored_users.keys().map(|id| id.to_string()).collect(); + + self.ignore_user_list_changes.set(user_ids); + } + Err(error) => { + warn!("Failed to deserialize ignored user list event: {error}") + } + } + } } for (room_id, room_info) in &changes.room_infos { @@ -1083,6 +1099,8 @@ impl BaseClient { /// Receive a get member events response and convert it to a deserialized /// `MembersResponse` /// + /// This client-server request must be made without filters to make sure all + /// members are received. Otherwise, an error is returned. /// /// # Arguments /// @@ -1090,92 +1108,105 @@ impl BaseClient { /// /// * `response` - The raw response that was received from the server. #[instrument(skip_all, fields(?room_id))] - pub async fn receive_members( + pub async fn receive_all_members( &self, room_id: &RoomId, + request: &api::membership::get_member_events::v3::Request, response: &api::membership::get_member_events::v3::Response, - ) -> Result { + ) -> Result<()> { + if request.membership.is_some() || request.not_membership.is_some() || request.at.is_some() + { + // This function assumes all members are loaded at once to optimise how display + // name disambiguation works. Using it with partial member list results + // would produce incorrect disambiguated display name entries + return Err(Error::InvalidReceiveMembersParameters); + } + let mut chunk = Vec::with_capacity(response.chunk.len()); - let mut ambiguity_cache = AmbiguityCache::new(self.store.inner.clone()); - if let Some(room) = self.store.get_room(room_id) { - let mut changes = StateChanges::default(); + let Some(room) = self.store.get_room(room_id) else { + // The room is unknown to us: leave early. + return Ok(()); + }; - #[cfg(feature = "e2e-encryption")] - let mut user_ids = BTreeSet::new(); + let mut changes = StateChanges::default(); - for raw_event in &response.chunk { - let member = match raw_event.deserialize() { - Ok(ev) => ev, - Err(e) => { - let event_id: Option = - raw_event.get_field("event_id").ok().flatten(); - debug!(event_id, "Failed to deserialize member event: {e}"); - continue; - } - }; + #[cfg(feature = "e2e-encryption")] + let mut user_ids = BTreeSet::new(); - // TODO: All the actions in this loop used to be done only when the membership - // event was not in the store before. This was changed with the new room API, - // because e.g. leaving a room makes members events outdated and they need to be - // fetched by `members`. Therefore, they need to be overwritten here, even - // if they exist. - // However, this makes a new problem occur where setting the member events here - // potentially races with the sync. - // See . + let mut ambiguity_map: BTreeMap> = BTreeMap::new(); - #[cfg(feature = "e2e-encryption")] - match member.membership() { - MembershipState::Join | MembershipState::Invite => { - user_ids.insert(member.state_key().to_owned()); - } - _ => (), + for raw_event in &response.chunk { + let member = match raw_event.deserialize() { + Ok(ev) => ev, + Err(e) => { + let event_id: Option = raw_event.get_field("event_id").ok().flatten(); + debug!(event_id, "Failed to deserialize member event: {e}"); + continue; } + }; - let sync_member: SyncRoomMemberEvent = member.clone().into(); + // TODO: All the actions in this loop used to be done only when the membership + // event was not in the store before. This was changed with the new room API, + // because e.g. leaving a room makes members events outdated and they need to be + // fetched by `members`. Therefore, they need to be overwritten here, even + // if they exist. + // However, this makes a new problem occur where setting the member events here + // potentially races with the sync. + // See . - ambiguity_cache.handle_event(&changes, room_id, &sync_member).await?; + #[cfg(feature = "e2e-encryption")] + match member.membership() { + MembershipState::Join | MembershipState::Invite => { + user_ids.insert(member.state_key().to_owned()); + } + _ => (), + } - if member.state_key() == member.sender() { - changes - .profiles - .entry(room_id.to_owned()) - .or_default() - .insert(member.sender().to_owned(), sync_member.into()); + if let StateEvent::Original(e) = &member { + if let Some(d) = &e.content.displayname { + ambiguity_map.entry(d.clone()).or_default().insert(member.state_key().clone()); } + } + let sync_member: SyncRoomMemberEvent = member.clone().into(); + + if member.state_key() == member.sender() { changes - .state + .profiles .entry(room_id.to_owned()) .or_default() - .entry(member.event_type()) - .or_default() - .insert(member.state_key().to_string(), raw_event.clone().cast()); - chunk.push(member); + .insert(member.sender().to_owned(), sync_member.into()); } - #[cfg(feature = "e2e-encryption")] - if room.is_encrypted() { - if let Some(o) = self.olm_machine().await.as_ref() { - o.update_tracked_users(user_ids.iter().map(Deref::deref)).await? - } + changes + .state + .entry(room_id.to_owned()) + .or_default() + .entry(member.event_type()) + .or_default() + .insert(member.state_key().to_string(), raw_event.clone().cast()); + chunk.push(member); + } + + #[cfg(feature = "e2e-encryption")] + if room.is_encrypted() { + if let Some(o) = self.olm_machine().await.as_ref() { + o.update_tracked_users(user_ids.iter().map(Deref::deref)).await? } + } - changes.ambiguity_maps = ambiguity_cache.cache; + changes.ambiguity_maps.insert(room_id.to_owned(), ambiguity_map); - let _sync_lock = self.sync_lock().lock().await; - let mut room_info = room.clone_info(); - room_info.mark_members_synced(); - changes.add_room(room_info); + let _sync_lock = self.sync_lock().lock().await; + let mut room_info = room.clone_info(); + room_info.mark_members_synced(); + changes.add_room(room_info); - self.store.save_changes(&changes).await?; - self.apply_changes(&changes, false); - } + self.store.save_changes(&changes).await?; + self.apply_changes(&changes, false); - Ok(MembersResponse { - chunk, - ambiguity_changes: AmbiguityChanges { changes: ambiguity_cache.changes }, - }) + Ok(()) } /// Receive a successful filter upload response, the filter id will be @@ -1409,7 +1440,7 @@ impl BaseClient { /// Returns a subscriber that publishes an event every time the ignore user /// list changes - pub fn subscribe_to_ignore_user_list_changes(&self) -> Subscriber<()> { + pub fn subscribe_to_ignore_user_list_changes(&self) -> Subscriber> { self.ignore_user_list_changes.subscribe() } @@ -1457,14 +1488,17 @@ mod tests { use serde_json::json; use super::BaseClient; - use crate::{store::StateStoreExt, DisplayName, RoomState, SessionMeta}; + use crate::{ + store::StateStoreExt, test_utils::logged_in_base_client, DisplayName, RoomState, + SessionMeta, + }; #[async_test] async fn test_invite_after_leaving() { let user_id = user_id!("@alice:example.org"); let room_id = room_id!("!test:example.org"); - let client = logged_in_client(user_id).await; + let client = logged_in_base_client(Some(user_id)).await; let mut ev_builder = SyncResponseBuilder::new(); @@ -1508,7 +1542,7 @@ mod tests { let user_id = user_id!("@alice:example.org"); let room_id = room_id!("!ithpyNKDtmhneaTQja:example.org"); - let client = logged_in_client(user_id).await; + let client = logged_in_base_client(Some(user_id)).await; let response = api::sync::sync_events::v3::Response::try_from_http_response(response_from_file(&json!({ "next_batch": "asdkl;fjasdkl;fj;asdkl;f", @@ -1593,11 +1627,11 @@ mod tests { #[cfg(all(feature = "e2e-encryption", feature = "experimental-sliding-sync"))] #[async_test] - async fn when_there_are_no_latest_encrypted_events_decrypting_them_does_nothing() { + async fn test_when_there_are_no_latest_encrypted_events_decrypting_them_does_nothing() { // Given a room let user_id = user_id!("@u:u.to"); let room_id = room_id!("!r:u.to"); - let client = logged_in_client(user_id).await; + let client = logged_in_base_client(Some(user_id)).await; let room = process_room_join_test_helper(&client, room_id, "$1", user_id).await; // Sanity: it has no latest_encrypted_events or latest_event @@ -1619,18 +1653,6 @@ mod tests { // events. In the meantime, there are tests for the most difficult logic // inside Room. --andyb - async fn logged_in_client(user_id: &UserId) -> BaseClient { - let client = BaseClient::new(); - client - .set_session_meta(SessionMeta { - user_id: user_id.to_owned(), - device_id: "FOOBAR".into(), - }) - .await - .expect("set_session_meta failed!"); - client - } - #[cfg(feature = "e2e-encryption")] async fn process_room_join_test_helper( client: &BaseClient, diff --git a/crates/matrix-sdk-base/src/error.rs b/crates/matrix-sdk-base/src/error.rs index 68e8bf8903e..bd7a8f1f1a1 100644 --- a/crates/matrix-sdk-base/src/error.rs +++ b/crates/matrix-sdk-base/src/error.rs @@ -56,4 +56,9 @@ pub enum Error { #[cfg(feature = "e2e-encryption")] #[error(transparent)] MegolmError(#[from] MegolmError), + + /// An error caused by calling the `BaseClient::receive_all_members` + /// function with invalid parameters + #[error("receive_all_members function was called with invalid parameters")] + InvalidReceiveMembersParameters, } diff --git a/crates/matrix-sdk-base/src/latest_event.rs b/crates/matrix-sdk-base/src/latest_event.rs index 9ed4a63c454..8d1e41c4301 100644 --- a/crates/matrix-sdk-base/src/latest_event.rs +++ b/crates/matrix-sdk-base/src/latest_event.rs @@ -9,7 +9,10 @@ use ruma::events::{ poll::unstable_start::SyncUnstablePollStartEvent, room::message::SyncRoomMessageEvent, AnySyncMessageLikeEvent, AnySyncTimelineEvent, }; -use ruma::{MxcUri, OwnedEventId}; +use ruma::{ + events::{call::invite::SyncCallInviteEvent, relation::RelationType}, + MxcUri, OwnedEventId, +}; use serde::{Deserialize, Serialize}; use crate::MinimalRoomMemberEvent; @@ -25,11 +28,15 @@ pub enum PossibleLatestEvent<'a> { YesRoomMessage(&'a SyncRoomMessageEvent), /// This message is suitable - it is a poll YesPoll(&'a SyncUnstablePollStartEvent), + + /// This message is suitable - it is a call invite + YesCallInvite(&'a SyncCallInviteEvent), + // Later: YesState(), // Later: YesReaction(), /// Not suitable - it's a state event NoUnsupportedEventType, - /// Not suitable - it's not an m.room.message + /// Not suitable - it's not a m.room.message or an edit/replacement NoUnsupportedMessageLikeType, /// Not suitable - it's encrypted NoEncrypted, @@ -40,15 +47,37 @@ pub enum PossibleLatestEvent<'a> { #[cfg(feature = "e2e-encryption")] pub fn is_suitable_for_latest_event(event: &AnySyncTimelineEvent) -> PossibleLatestEvent<'_> { match event { - // Suitable - we have an m.room.message that was not redacted + // Suitable - we have an m.room.message that was not redacted or edited AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage(message)) => { - PossibleLatestEvent::YesRoomMessage(message) + // Check if this is a replacement for another message. If it is, ignore it + if let Some(original_message) = message.as_original() { + let is_replacement = + original_message.content.relates_to.as_ref().map_or(false, |relates_to| { + if let Some(relation_type) = relates_to.rel_type() { + relation_type == RelationType::Replacement + } else { + false + } + }); + + if is_replacement { + return PossibleLatestEvent::NoUnsupportedMessageLikeType; + } else { + return PossibleLatestEvent::YesRoomMessage(message); + } + } + + return PossibleLatestEvent::YesRoomMessage(message); } AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::UnstablePollStart(poll)) => { PossibleLatestEvent::YesPoll(poll) } + AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::CallInvite(invite)) => { + PossibleLatestEvent::YesCallInvite(invite) + } + // Encrypted events are not suitable AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomEncrypted(_)) => { PossibleLatestEvent::NoEncrypted @@ -225,10 +254,15 @@ mod tests { use matrix_sdk_common::deserialized_responses::SyncTimelineEvent; use ruma::{ events::{ + call::{ + invite::{CallInviteEventContent, SyncCallInviteEvent}, + SessionDescription, + }, poll::unstable_start::{ NewUnstablePollStartEventContent, SyncUnstablePollStartEvent, UnstablePollAnswer, UnstablePollStartContentBlock, }, + relation::Replacement, room::{ encrypted::{ EncryptedEventScheme, OlmV1Curve25519AesSha2Content, RoomEncryptedEventContent, @@ -236,7 +270,7 @@ mod tests { }, message::{ ImageMessageEventContent, MessageType, RedactedRoomMessageEventContent, - RoomMessageEventContent, SyncRoomMessageEvent, + Relation, RoomMessageEventContent, SyncRoomMessageEvent, }, topic::{RoomTopicEventContent, SyncRoomTopicEvent}, ImageInfo, MediaSource, @@ -249,7 +283,7 @@ mod tests { }, owned_event_id, owned_mxc_uri, owned_user_id, serde::Raw, - MilliSecondsSinceUnixEpoch, UInt, + MilliSecondsSinceUnixEpoch, UInt, VoipVersionId, }; use serde_json::json; @@ -302,6 +336,28 @@ mod tests { assert_eq!(m.content.poll_start().question.text, "do you like rust?"); } + #[test] + fn call_invites_are_suitable() { + let event = AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::CallInvite( + SyncCallInviteEvent::Original(OriginalSyncMessageLikeEvent { + content: CallInviteEventContent::new( + "call_id".into(), + UInt::new(123).unwrap(), + SessionDescription::new("".into(), "".into()), + VoipVersionId::V1, + ), + event_id: owned_event_id!("$1"), + sender: owned_user_id!("@a:b.c"), + origin_server_ts: MilliSecondsSinceUnixEpoch(UInt::new(2123).unwrap()), + unsigned: MessageLikeUnsigned::new(), + }), + )); + assert_let!( + PossibleLatestEvent::YesCallInvite(SyncMessageLikeEvent::Original(_)) = + is_suitable_for_latest_event(&event) + ); + } + #[test] fn different_types_of_messagelike_are_unsuitable() { let event = AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::Sticker( @@ -391,6 +447,30 @@ mod tests { ); } + #[test] + fn replacement_events_are_unsuitable() { + let mut event_content = RoomMessageEventContent::text_plain("Bye bye, world!"); + event_content.relates_to = Some(Relation::Replacement(Replacement::new( + owned_event_id!("$1"), + RoomMessageEventContent::text_plain("Hello, world!").into(), + ))); + + let event = AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage( + SyncRoomMessageEvent::Original(OriginalSyncMessageLikeEvent { + content: event_content, + event_id: owned_event_id!("$2"), + sender: owned_user_id!("@a:b.c"), + origin_server_ts: MilliSecondsSinceUnixEpoch(UInt::new(2123).unwrap()), + unsigned: MessageLikeUnsigned::new(), + }), + )); + + assert_matches!( + is_suitable_for_latest_event(&event), + PossibleLatestEvent::NoUnsupportedMessageLikeType + ); + } + #[test] fn deserialize_latest_event() { #[derive(Debug, serde::Serialize, serde::Deserialize)] diff --git a/crates/matrix-sdk-base/src/lib.rs b/crates/matrix-sdk-base/src/lib.rs index a5ba2324a24..e382c78234d 100644 --- a/crates/matrix-sdk-base/src/lib.rs +++ b/crates/matrix-sdk-base/src/lib.rs @@ -38,6 +38,8 @@ mod sliding_sync; pub mod store; pub mod sync; +#[cfg(any(test, feature = "testing"))] +mod test_utils; mod utils; #[cfg(feature = "uniffi")] diff --git a/crates/matrix-sdk-base/src/rooms/members.rs b/crates/matrix-sdk-base/src/rooms/members.rs index 1e1dd53299b..9c369e0ed39 100644 --- a/crates/matrix-sdk-base/src/rooms/members.rs +++ b/crates/matrix-sdk-base/src/rooms/members.rs @@ -122,7 +122,8 @@ impl RoomMember { /// Get the normalized power level of this member. /// /// The normalized power level depends on the maximum power level that can - /// be found in a certain room, it's always in the range of 0-100. + /// be found in a certain room, positive values are always in the range of + /// 0-100. pub fn normalized_power_level(&self) -> i64 { if self.max_power_level > 0 { (self.power_level() * 100) / self.max_power_level diff --git a/crates/matrix-sdk-base/src/sliding_sync.rs b/crates/matrix-sdk-base/src/sliding_sync.rs index 93046638183..f66a01a8009 100644 --- a/crates/matrix-sdk-base/src/sliding_sync.rs +++ b/crates/matrix-sdk-base/src/sliding_sync.rs @@ -585,8 +585,10 @@ async fn cache_latest_events( for event in events.iter().rev() { if let Ok(timeline_event) = event.event.deserialize() { match is_suitable_for_latest_event(&timeline_event) { - PossibleLatestEvent::YesRoomMessage(_) | PossibleLatestEvent::YesPoll(_) => { - // m.room.message or m.poll.start - we found one! Store it. + PossibleLatestEvent::YesRoomMessage(_) + | PossibleLatestEvent::YesPoll(_) + | PossibleLatestEvent::YesCallInvite(_) => { + // We found a suitable latest event. Store it. // In order to make the latest event fast to read, we want to keep the // associated sender in cache. This is a best-effort to gather enough @@ -728,7 +730,7 @@ mod tests { use matrix_sdk_test::async_test; use ruma::{ api::client::sync::sync_events::{v4, UnreadNotificationsCount}, - assign, device_id, event_id, + assign, event_id, events::{ direct::DirectEventContent, room::{ @@ -747,11 +749,13 @@ mod tests { use serde_json::json; use super::cache_latest_events; - use crate::{store::MemoryStore, BaseClient, Room, RoomState, SessionMeta}; + use crate::{ + store::MemoryStore, test_utils::logged_in_base_client, BaseClient, Room, RoomState, + }; #[async_test] async fn test_notification_count_set() { - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let mut response = v4::Response::new("42".to_owned()); let room_id = room_id!("!room:example.org"); @@ -781,7 +785,7 @@ mod tests { #[async_test] async fn can_process_empty_sliding_sync_response() { - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let empty_response = v4::Response::new("5".to_owned()); client.process_sliding_sync(&empty_response, &()).await.expect("Failed to process sync"); } @@ -789,7 +793,7 @@ mod tests { #[async_test] async fn room_with_unspecified_state_is_added_to_client_and_joined_list() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); // When I send sliding sync response containing a room (with identifiable data @@ -815,7 +819,7 @@ mod tests { #[async_test] async fn room_name_is_found_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); // When I send sliding sync response containing a room with a name @@ -839,7 +843,7 @@ mod tests { #[async_test] async fn invited_room_name_is_found_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@w:e.uk"); @@ -865,7 +869,7 @@ mod tests { #[async_test] async fn left_a_room_from_required_state_event() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -895,7 +899,7 @@ mod tests { #[async_test] async fn left_a_room_from_timeline_state_event() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -921,7 +925,7 @@ mod tests { // See https://github.com/matrix-org/matrix-rust-sdk/issues/1834 // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -958,7 +962,7 @@ mod tests { let user_b_id = user_id!("@b:e.uk"); // Given we have a DM with B, who is joined - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; create_dm(&client, room_id, user_a_id, user_b_id, MembershipState::Join).await; // (Sanity: B is a direct target, and is in Join state) @@ -983,7 +987,7 @@ mod tests { let user_b_id = user_id!("@b:e.uk"); // Given I have invited B to a DM - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; create_dm(&client, room_id, user_a_id, user_b_id, MembershipState::Invite).await; // (Sanity: B is a direct target, and is in Invite state) @@ -1007,7 +1011,7 @@ mod tests { let user_b_id = user_id!("@b:bar.org"); // Given we have a DM with B, who is joined - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; create_dm(&client, room_id, user_a_id, user_b_id, MembershipState::Join).await; // (Sanity: A is in Join state) @@ -1031,7 +1035,7 @@ mod tests { let user_b_id = user_id!("@b:bar.org"); // Given we have a DM with B, who is joined - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; create_dm(&client, room_id, user_a_id, user_b_id, MembershipState::Invite).await; // (Sanity: A is in Join state) @@ -1051,7 +1055,7 @@ mod tests { #[async_test] async fn avatar_is_found_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); // When I send sliding sync response containing a room with an avatar @@ -1075,7 +1079,7 @@ mod tests { #[async_test] async fn avatar_can_be_unset_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); // Set the avatar. @@ -1131,7 +1135,7 @@ mod tests { #[async_test] async fn avatar_is_found_from_required_state_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -1151,7 +1155,7 @@ mod tests { #[async_test] async fn invitation_room_is_added_to_client_and_invite_list() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -1175,7 +1179,7 @@ mod tests { #[async_test] async fn avatar_is_found_in_invitation_room_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); @@ -1196,7 +1200,7 @@ mod tests { #[async_test] async fn canonical_alias_is_found_in_invitation_room_when_processing_sliding_sync_response() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); let room_alias_id = room_alias_id!("#myroom:e.uk"); @@ -1215,7 +1219,7 @@ mod tests { #[async_test] async fn display_name_from_sliding_sync_overrides_alias() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let user_id = user_id!("@u:e.uk"); let room_alias_id = room_alias_id!("#myroom:e.uk"); @@ -1238,7 +1242,7 @@ mod tests { #[async_test] async fn last_event_from_sliding_sync_is_cached() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let event_a = json!({ "sender":"@alice:example.com", @@ -1272,7 +1276,7 @@ mod tests { #[async_test] async fn cached_latest_event_can_be_redacted() { // Given a logged-in client - let client = logged_in_client().await; + let client = logged_in_base_client(None).await; let room_id = room_id!("!r:e.uk"); let event_a = json!({ "sender": "@alice:example.com", @@ -1726,18 +1730,6 @@ mod tests { .push(make_global_account_data_event(DirectEventContent(direct_content))); } - async fn logged_in_client() -> BaseClient { - let client = BaseClient::new(); - client - .set_session_meta(SessionMeta { - user_id: user_id!("@u:e.uk").to_owned(), - device_id: device_id!("XYZ").to_owned(), - }) - .await - .expect("Failed to set session meta"); - client - } - async fn response_with_room(room_id: &RoomId, room: v4::SlidingSyncRoom) -> v4::Response { let mut response = v4::Response::new("5".to_owned()); response.rooms.insert(room_id.to_owned(), room); diff --git a/crates/matrix-sdk-base/src/store/traits.rs b/crates/matrix-sdk-base/src/store/traits.rs index 89caf9f697c..8d294db7e15 100644 --- a/crates/matrix-sdk-base/src/store/traits.rs +++ b/crates/matrix-sdk-base/src/store/traits.rs @@ -302,7 +302,8 @@ pub trait StateStore: AsyncTraitDeps { /// * `key` - The key to fetch data for async fn get_custom_value(&self, key: &[u8]) -> Result>, Self::Error>; - /// Put arbitrary data into the custom store + /// Put arbitrary data into the custom store, return the data previously + /// stored /// /// # Arguments /// @@ -315,6 +316,27 @@ pub trait StateStore: AsyncTraitDeps { value: Vec, ) -> Result>, Self::Error>; + /// Put arbitrary data into the custom store, do not attempt to read any + /// previous data + /// + /// Optimization option for set_custom_values for stores that would perform + /// better withouts the extra read and the caller not needing that data + /// returned. Otherwise this just wraps around `set_custom_data` and + /// discards the result. + /// + /// # Arguments + /// + /// * `key` - The key to insert data into + /// + /// * `value` - The value to insert + async fn set_custom_value_no_read( + &self, + key: &[u8], + value: Vec, + ) -> Result<(), Self::Error> { + self.set_custom_value(key, value).await.map(|_| ()) + } + /// Remove arbitrary data from the custom store and return it if existed /// /// # Arguments diff --git a/crates/matrix-sdk-base/src/test_utils.rs b/crates/matrix-sdk-base/src/test_utils.rs new file mode 100644 index 00000000000..f59f976b7e9 --- /dev/null +++ b/crates/matrix-sdk-base/src/test_utils.rs @@ -0,0 +1,34 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Testing utilities - DO NOT USE IN PRODUCTION. + +#![allow(dead_code)] + +use ruma::{owned_user_id, UserId}; + +use crate::{BaseClient, SessionMeta}; + +/// Create a [`BaseClient`] with the given user id, if provided, or an hardcoded +/// one otherwise. +pub(crate) async fn logged_in_base_client(user_id: Option<&UserId>) -> BaseClient { + let client = BaseClient::new(); + let user_id = + user_id.map(|user_id| user_id.to_owned()).unwrap_or_else(|| owned_user_id!("@u:e.uk")); + client + .set_session_meta(SessionMeta { user_id: user_id.to_owned(), device_id: "FOOBAR".into() }) + .await + .expect("set_session_meta failed!"); + client +} diff --git a/crates/matrix-sdk-common/src/executor.rs b/crates/matrix-sdk-common/src/executor.rs index a8eecb5cd1f..b1cb1a7bf13 100644 --- a/crates/matrix-sdk-common/src/executor.rs +++ b/crates/matrix-sdk-common/src/executor.rs @@ -15,8 +15,6 @@ //! Abstraction over an executor so we can spawn tasks under WASM the same way //! we do usually. -#[cfg(target_arch = "wasm32")] -pub use std::convert::Infallible as JoinError; #[cfg(target_arch = "wasm32")] use std::{ future::Future, @@ -25,7 +23,12 @@ use std::{ }; #[cfg(target_arch = "wasm32")] -use futures_util::{future::RemoteHandle, FutureExt}; +pub use futures_util::future::Aborted as JoinError; +#[cfg(target_arch = "wasm32")] +use futures_util::{ + future::{AbortHandle, Abortable, RemoteHandle}, + FutureExt, +}; #[cfg(not(target_arch = "wasm32"))] pub use tokio::task::{spawn, JoinError, JoinHandle}; @@ -34,16 +37,31 @@ pub fn spawn(future: F) -> JoinHandle where F: Future + 'static, { - let (fut, handle) = future.remote_handle(); - wasm_bindgen_futures::spawn_local(fut); + let (future, remote_handle) = future.remote_handle(); + let (abort_handle, abort_registration) = AbortHandle::new_pair(); + let future = Abortable::new(future, abort_registration); - JoinHandle { handle } + wasm_bindgen_futures::spawn_local(async { + // Poll the future, and ignore the result (either it's `Ok(())`, or it's + // `Err(Aborted)`). + let _ = future.await; + }); + + JoinHandle { remote_handle, abort_handle } } #[cfg(target_arch = "wasm32")] #[derive(Debug)] pub struct JoinHandle { - handle: RemoteHandle, + remote_handle: RemoteHandle, + abort_handle: AbortHandle, +} + +#[cfg(target_arch = "wasm32")] +impl JoinHandle { + pub fn abort(&self) { + self.abort_handle.abort(); + } } #[cfg(target_arch = "wasm32")] @@ -51,6 +69,37 @@ impl Future for JoinHandle { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.handle).poll(cx).map(Ok) + if self.abort_handle.is_aborted() { + // The future has been aborted. It is not possible to poll it again. + Poll::Ready(Err(JoinError)) + } else { + Pin::new(&mut self.remote_handle).poll(cx).map(Ok) + } + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use matrix_sdk_test::async_test; + + use super::spawn; + + #[async_test] + async fn test_spawn() { + let future = async { 42 }; + let join_handle = spawn(future); + + assert_matches!(join_handle.await, Ok(42)); + } + + #[async_test] + async fn test_abort() { + let future = async { 42 }; + let join_handle = spawn(future); + + join_handle.abort(); + + assert!(join_handle.await.is_err()); } } diff --git a/crates/matrix-sdk-common/src/ring_buffer.rs b/crates/matrix-sdk-common/src/ring_buffer.rs index 89efb621807..8b3bbcff15a 100644 --- a/crates/matrix-sdk-common/src/ring_buffer.rs +++ b/crates/matrix-sdk-common/src/ring_buffer.rs @@ -21,7 +21,7 @@ use std::{ ops::RangeBounds, }; -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// A simple fixed-size ring buffer implementation. /// diff --git a/crates/matrix-sdk-crypto/CHANGELOG.md b/crates/matrix-sdk-crypto/CHANGELOG.md index f06ab1d3b6b..08a968da732 100644 --- a/crates/matrix-sdk-crypto/CHANGELOG.md +++ b/crates/matrix-sdk-crypto/CHANGELOG.md @@ -2,11 +2,21 @@ Breaking changes: +- Rename the `OlmMachine::invalidate_group_session` method to + `OlmMachine::discard_room_key` + +- Move `OlmMachine::export_room_keys` to `matrix_sdk_crypto::store::Store`. + (Call it with `olm_machine.store().export_room_keys(...)`.) + - Add new `dehydrated` property to `olm::account::PickledAccount`. ([#3164](https://github.com/matrix-org/matrix-rust-sdk/pull/3164)) Additions: +- When Olm message decryption fails, report the error code(s) from the failure. + ([#3212](https://github.com/matrix-org/matrix-rust-sdk/pull/3212)) + +>>>>>>> main - Expose new methods `OlmMachine::set_room_settings` and `OlmMachine::get_room_settings`. ([#3042](https://github.com/matrix-org/matrix-rust-sdk/pull/3042)) @@ -23,6 +33,12 @@ Additions: to encrypt an event to a specific device. ([#3091](https://github.com/matrix-org/matrix-rust-sdk/pull/3091)) +- Add new API `store::Store::export_room_keys_stream` that provides room + keys on demand. + +- Include event timestamps on logs from event decryption. + ([#3194](https://github.com/matrix-org/matrix-rust-sdk/pull/3194)) + # 0.7.0 - Add method to mark a list of inbound group sessions as backed up: diff --git a/crates/matrix-sdk-crypto/Cargo.toml b/crates/matrix-sdk-crypto/Cargo.toml index b579e64d88d..16def859cc1 100644 --- a/crates/matrix-sdk-crypto/Cargo.toml +++ b/crates/matrix-sdk-crypto/Cargo.toml @@ -22,6 +22,7 @@ qrcode = ["dep:matrix-sdk-qrcode"] message-ids = ["dep:ulid"] experimental-algorithms = [] uniffi = ["dep:uniffi"] +_disable-minimum-rotation-period-ms = [] # Testing helpers for implementations based upon this testing = ["dep:http"] @@ -52,6 +53,7 @@ serde = { workspace = true, features = ["derive", "rc"] } serde_json = { workspace = true } sha2 = { workspace = true } subtle = "2.5.0" +time = { version = "0.3.34", features = ["formatting"] } tokio-stream = { workspace = true, features = ["sync"] } tokio = { workspace = true } thiserror = { workspace = true } diff --git a/crates/matrix-sdk-crypto/README.md b/crates/matrix-sdk-crypto/README.md index e46418915ad..02119dea068 100644 --- a/crates/matrix-sdk-crypto/README.md +++ b/crates/matrix-sdk-crypto/README.md @@ -70,4 +70,6 @@ The following crate feature flags are available: * `qrcode`: Enbles QRcode generation and reading code -* `testing`: provides facilities and functions for tests, in particular for integration testing store implementations. ATTENTION: do not ever use outside of tests, we do not provide any stability warantees on these, these are merely helpers. If you find you _need_ any function provided here outside of tests, please open a Github Issue and inform us about your use case for us to consider. +* `testing`: Provides facilities and functions for tests, in particular for integration testing store implementations. ATTENTION: do not ever use outside of tests, we do not provide any stability warantees on these, these are merely helpers. If you find you _need_ any function provided here outside of tests, please open a Github Issue and inform us about your use case for us to consider. + +* `_disable-minimum-rotation-period-ms`: Do not use except for testing. Disables the floor on the rotation period of room keys. diff --git a/crates/matrix-sdk-crypto/src/backups/keys/decryption.rs b/crates/matrix-sdk-crypto/src/backups/keys/decryption.rs index 739cb92d945..4b0cca6509a 100644 --- a/crates/matrix-sdk-crypto/src/backups/keys/decryption.rs +++ b/crates/matrix-sdk-crypto/src/backups/keys/decryption.rs @@ -17,7 +17,6 @@ use std::{ ops::DerefMut, }; -use bs58; use ruma::api::client::backup::EncryptedSessionData; use thiserror::Error; use vodozemac::Curve25519PublicKey; diff --git a/crates/matrix-sdk-crypto/src/error.rs b/crates/matrix-sdk-crypto/src/error.rs index 8400cd7bf5f..3e4e0bcf836 100644 --- a/crates/matrix-sdk-crypto/src/error.rs +++ b/crates/matrix-sdk-crypto/src/error.rs @@ -38,10 +38,6 @@ pub enum OlmError { #[error(transparent)] JsonError(#[from] SerdeError), - /// The event could not have been decrypted. - #[error(transparent)] - Decryption(#[from] vodozemac::olm::DecryptionError), - /// The received room key couldn't be converted into a valid Megolm session. #[error(transparent)] SessionCreation(#[from] SessionCreationError), diff --git a/crates/matrix-sdk-crypto/src/file_encryption/key_export.rs b/crates/matrix-sdk-crypto/src/file_encryption/key_export.rs index 52202bb50ac..ed7f1740afc 100644 --- a/crates/matrix-sdk-crypto/src/file_encryption/key_export.rs +++ b/crates/matrix-sdk-crypto/src/file_encryption/key_export.rs @@ -130,7 +130,7 @@ pub fn decrypt_room_key_export( /// # async { /// # let machine = OlmMachine::new(&alice, device_id!("DEVICEID")).await; /// let room_id = room_id!("!test:localhost"); -/// let exported_keys = machine.export_room_keys(|s| s.room_id() == room_id).await.unwrap(); +/// let exported_keys = machine.store().export_room_keys(|s| s.room_id() == room_id).await.unwrap(); /// let encrypted_export = encrypt_room_key_export(&exported_keys, "1234", 1); /// # }; /// ``` @@ -293,7 +293,7 @@ mod tests { let room_id = room_id!("!test:localhost"); machine.create_outbound_group_session_with_defaults_test_helper(room_id).await.unwrap(); - let export = machine.export_room_keys(|s| s.room_id() == room_id).await.unwrap(); + let export = machine.store().export_room_keys(|s| s.room_id() == room_id).await.unwrap(); assert!(!export.is_empty()); diff --git a/crates/matrix-sdk-crypto/src/machine.rs b/crates/matrix-sdk-crypto/src/machine.rs index 33584a5c4f3..f597863b09e 100644 --- a/crates/matrix-sdk-crypto/src/machine.rs +++ b/crates/matrix-sdk-crypto/src/machine.rs @@ -88,6 +88,7 @@ use crate::{ }, EventEncryptionAlgorithm, Signatures, }, + utilities::timestamp_to_iso8601, verification::{Verification, VerificationMachine, VerificationRequest}, CrossSigningKeyExport, CryptoStoreError, KeysQueryRequest, LocalTrust, ReadOnlyDevice, RoomKeyImportResult, SignatureError, ToDeviceRequest, @@ -894,12 +895,17 @@ impl OlmMachine { self.inner.group_session_manager.encrypt(room_id, event_type, content).await } - /// Invalidate the currently active outbound group session for the given - /// room. + /// Forces the currently active room key, which is used to encrypt messages, + /// to be rotated. + /// + /// A new room key will be crated and shared with all the room members the + /// next time a message will be sent. You don't have to call this method, + /// room keys will be rotated automatically when necessary. This method is + /// still useful for debugging purposes. /// /// Returns true if a session was invalidated, false if there was no session /// to invalidate. - pub async fn invalidate_group_session(&self, room_id: &RoomId) -> StoreResult { + pub async fn discard_room_key(&self, room_id: &RoomId) -> StoreResult { self.inner.group_session_manager.invalidate_group_session(room_id).await } @@ -1529,7 +1535,7 @@ impl OlmMachine { /// * `event` - The event that should be decrypted. /// /// * `room_id` - The ID of the room where the event was sent to. - #[instrument(skip_all, fields(?room_id, event_id, sender, algorithm, session_id, sender_key))] + #[instrument(skip_all, fields(?room_id, event_id, origin_server_ts, sender, algorithm, session_id, sender_key))] pub async fn decrypt_room_event( &self, event: &Raw, @@ -1540,6 +1546,11 @@ impl OlmMachine { tracing::Span::current() .record("sender", debug(&event.sender)) .record("event_id", debug(&event.event_id)) + .record( + "origin_server_ts", + timestamp_to_iso8601(event.origin_server_ts) + .unwrap_or_else(|| "".to_owned()), + ) .record("algorithm", debug(event.content.algorithm())); let content: SupportedEventEncryptionSchemes<'_> = match &event.content.scheme { @@ -1802,45 +1813,6 @@ impl OlmMachine { self.store().import_room_keys(exported_keys, from_backup, progress_listener).await } - /// Export the keys that match the given predicate. - /// - /// # Arguments - /// - /// * `predicate` - A closure that will be called for every known - /// `InboundGroupSession`, which represents a room key. If the closure - /// returns `true` the `InboundGroupSession` will be included in the export, - /// if the closure returns `false` it will not be included. - /// - /// # Examples - /// - /// ```no_run - /// # use matrix_sdk_crypto::{OlmMachine, encrypt_room_key_export}; - /// # use ruma::{device_id, user_id, room_id}; - /// # let alice = user_id!("@alice:example.org"); - /// # async { - /// # let machine = OlmMachine::new(&alice, device_id!("DEVICEID")).await; - /// let room_id = room_id!("!test:localhost"); - /// let exported_keys = machine.export_room_keys(|s| s.room_id() == room_id).await.unwrap(); - /// let encrypted_export = encrypt_room_key_export(&exported_keys, "1234", 1); - /// # }; - /// ``` - pub async fn export_room_keys( - &self, - predicate: impl FnMut(&InboundGroupSession) -> bool, - ) -> StoreResult> { - let mut exported = Vec::new(); - - let mut sessions = self.store().get_inbound_group_sessions().await?; - sessions.retain(predicate); - - for session in sessions { - let export = session.export().await; - exported.push(export); - } - - Ok(exported) - } - /// Get the status of the private cross signing keys. /// /// This can be used to check which private cross signing keys we have @@ -1955,6 +1927,8 @@ impl OlmMachine { None => 0, }; + tracing::debug!("Initialising crypto store generation at {}", gen); + self.inner .store .set_custom_value(Self::CURRENT_GENERATION_STORE_KEY, gen.to_le_bytes().to_vec()) @@ -1967,19 +1941,32 @@ impl OlmMachine { /// If needs be, update the local and on-disk crypto store generation. /// - /// Returns true whether another user has modified the internal generation - /// counter, and as such we've incremented and updated it in the - /// database. - /// /// ## Requirements /// /// - This assumes that `initialize_crypto_store_generation` has been called /// beforehand. /// - This requires that the crypto store lock has been acquired. - pub async fn maintain_crypto_store_generation( - &self, + /// + /// # Arguments + /// + /// * `generation` - The in-memory generation counter (or rather, the + /// `Mutex` wrapping it). This defines the "expected" generation on entry, + /// and, if we determine an update is needed, is updated to hold the "new" + /// generation. + /// + /// # Returns + /// + /// A tuple containing: + /// + /// * A `bool`, set to `true` if another process has updated the generation + /// number in the `Store` since our expected value, and as such we've + /// incremented and updated it in the database. Otherwise, `false`. + /// + /// * The (possibly updated) generation counter. + pub async fn maintain_crypto_store_generation<'a>( + &'a self, generation: &Mutex>, - ) -> StoreResult { + ) -> StoreResult<(bool, u64)> { let mut gen_guard = generation.lock().await; // The database value must be there: @@ -2001,10 +1988,10 @@ impl OlmMachine { CryptoStoreError::InvalidLockGeneration("invalid format".to_owned()) })?); - let expected_gen = match gen_guard.as_ref() { + let new_gen = match gen_guard.as_ref() { Some(expected_gen) => { if actual_gen == *expected_gen { - return Ok(false); + return Ok((false, actual_gen)); } // Increment the biggest, and store it everywhere. actual_gen.max(*expected_gen).wrapping_add(1) @@ -2020,22 +2007,19 @@ impl OlmMachine { "Crypto store generation mismatch: previously known was {:?}, actual is {:?}, next is {}", *gen_guard, actual_gen, - expected_gen + new_gen ); // Update known value. - *gen_guard = Some(expected_gen); + *gen_guard = Some(new_gen); // Update value in database. self.inner .store - .set_custom_value( - Self::CURRENT_GENERATION_STORE_KEY, - expected_gen.to_le_bytes().to_vec(), - ) + .set_custom_value(Self::CURRENT_GENERATION_STORE_KEY, new_gen.to_le_bytes().to_vec()) .await?; - Ok(true) + Ok((true, new_gen)) } /// Manage dehydrated devices. @@ -2351,7 +2335,7 @@ pub(crate) mod tests { (machine, otk) } - async fn get_machine_pair( + pub async fn get_machine_pair( alice: &UserId, bob: &UserId, use_fallback_key: bool, @@ -2515,7 +2499,7 @@ pub(crate) mod tests { machine.create_outbound_group_session_with_defaults_test_helper(room_id).await.unwrap(); assert!(machine.inner.group_session_manager.get_outbound_group_session(room_id).is_some()); - machine.invalidate_group_session(room_id).await.unwrap(); + machine.discard_room_key(room_id).await.unwrap(); assert!(machine .inner diff --git a/crates/matrix-sdk-crypto/src/olm/account.rs b/crates/matrix-sdk-crypto/src/olm/account.rs index ae77a6f8f69..39537aa5cd9 100644 --- a/crates/matrix-sdk-crypto/src/olm/account.rs +++ b/crates/matrix-sdk-crypto/src/olm/account.rs @@ -1159,32 +1159,33 @@ impl Account { match message { OlmMessage::Normal(_) => { - let session_ids = if let Some(sessions) = existing_sessions { + let mut errors_by_olm_session = Vec::new(); + + if let Some(sessions) = existing_sessions { let sessions = &mut *sessions.lock().await; // Try to decrypt the message using each Session we share with the // given curve25519 sender key. for session in sessions.iter_mut() { - if let Ok(p) = session.decrypt(message).await { - // success! - return Ok((SessionType::Existing(session.clone()), p)); - } else { - // An error here is completely normal, after all we don't know - // which session was used to encrypt a message. We will log a - // warning if no session was able to decrypt the message. - continue; + match session.decrypt(message).await { + Ok(p) => { + // success! + return Ok((SessionType::Existing(session.clone()), p)); + } + + Err(e) => { + // An error here is completely normal, after all we don't know + // which session was used to encrypt a message. + // We keep hold of the error, so that if *all* sessions fail to + // decrypt, we can log something useful. + errors_by_olm_session.push((session.session_id().to_owned(), e)); + } } } - - // decryption wasn't successful with any of the sessions. Collect a list of - // session IDs to log. - sessions.iter().map(|s| s.session_id().to_owned()).collect() - } else { - vec![] - }; + } warn!( - ?session_ids, + ?errors_by_olm_session, "Failed to decrypt a non-pre-key message with all available sessions" ); Err(OlmError::SessionWedged(sender.to_owned(), sender_key)) diff --git a/crates/matrix-sdk-crypto/src/olm/group_sessions/outbound.rs b/crates/matrix-sdk-crypto/src/olm/group_sessions/outbound.rs index b75fd08201e..c78936e862a 100644 --- a/crates/matrix-sdk-crypto/src/olm/group_sessions/outbound.rs +++ b/crates/matrix-sdk-crypto/src/olm/group_sessions/outbound.rs @@ -59,7 +59,10 @@ use crate::{ ReadOnlyDevice, ToDeviceRequest, }; -const ROTATION_PERIOD: Duration = Duration::from_millis(604800000); +const ONE_HOUR: Duration = Duration::from_secs(60 * 60); +const ONE_WEEK: Duration = Duration::from_secs(60 * 60 * 24 * 7); + +const ROTATION_PERIOD: Duration = ONE_WEEK; const ROTATION_MESSAGES: u64 = 100; #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -415,15 +418,27 @@ impl OutboundGroupSession { fn elapsed(&self) -> bool { let creation_time = Duration::from_secs(self.creation_time.get().into()); let now = Duration::from_secs(SecondsSinceUnixEpoch::now().get().into()); - - // Since the encryption settings are provided by users and not - // checked someone could set a really low rotation period so - // clamp it to an hour. now.checked_sub(creation_time) - .map(|elapsed| elapsed >= max(self.settings.rotation_period, Duration::from_secs(3600))) + .map(|elapsed| elapsed >= self.safe_rotation_period()) .unwrap_or(true) } + /// Returns the rotation_period_ms that was set for this session, clamped + /// to be no less than one hour. + /// + /// This is to prevent a malicious or careless user causing sessions to be + /// rotated very frequently. + /// + /// The feature flag `_disable-minimum-rotation-period-ms` can + /// be used to prevent this behaviour (which can be useful for tests). + fn safe_rotation_period(&self) -> Duration { + if cfg!(feature = "_disable-minimum-rotation-period-ms") { + self.settings.rotation_period + } else { + max(self.settings.rotation_period, ONE_HOUR) + } + } + /// Check if the session has expired and if it should be rotated. /// /// A session will expire after some time or if enough messages have been @@ -735,20 +750,16 @@ pub struct PickledOutboundGroupSession { #[cfg(test)] mod tests { - use std::{sync::atomic::Ordering, time::Duration}; + use std::time::Duration; - use matrix_sdk_test::async_test; use ruma::{ - device_id, events::room::{ encryption::RoomEncryptionEventContent, history_visibility::HistoryVisibility, - message::RoomMessageEventContent, }, - room_id, uint, user_id, EventEncryptionAlgorithm, + uint, EventEncryptionAlgorithm, }; use super::{EncryptionSettings, ROTATION_MESSAGES, ROTATION_PERIOD}; - use crate::{Account, MegolmError}; #[test] fn test_encryption_settings_conversion() { @@ -768,78 +779,208 @@ mod tests { assert_eq!(settings.rotation_period_msgs, 500); } - #[async_test] #[cfg(any(target_os = "linux", target_os = "macos", target_arch = "wasm32"))] - async fn test_expiration() -> Result<(), MegolmError> { - use ruma::{serde::Raw, SecondsSinceUnixEpoch}; - - let settings = EncryptionSettings { rotation_period_msgs: 1, ..Default::default() }; - - let account = - Account::with_device_id(user_id!("@alice:example.org"), device_id!("DEVICEID")) - .static_data; - let (session, _) = account - .create_group_session_pair(room_id!("!test_room:example.org"), settings) - .await - .unwrap(); - - assert!(!session.expired()); - let _ = session - .encrypt( - "m.room.message", - &Raw::new(&RoomMessageEventContent::text_plain("Test message"))?.cast(), - ) - .await; - assert!(session.expired()); + mod expiration { + use std::{sync::atomic::Ordering, time::Duration}; - let settings = EncryptionSettings { - rotation_period: Duration::from_millis(100), - ..Default::default() + use matrix_sdk_test::async_test; + use ruma::{ + device_id, events::room::message::RoomMessageEventContent, room_id, serde::Raw, uint, + user_id, SecondsSinceUnixEpoch, }; - let (mut session, _) = account - .create_group_session_pair(room_id!("!test_room:example.org"), settings) - .await - .unwrap(); + use crate::{olm::OutboundGroupSession, Account, EncryptionSettings, MegolmError}; - assert!(!session.expired()); + const TWO_HOURS: Duration = Duration::from_secs(60 * 60 * 2); - let now = SecondsSinceUnixEpoch::now(); - session.creation_time = SecondsSinceUnixEpoch(now.get() - uint!(3600)); - assert!(session.expired()); + #[async_test] + async fn session_is_not_expired_if_no_messages_sent_and_no_time_passed() { + // Given a session that expires after one message + let session = create_session(EncryptionSettings { + rotation_period_msgs: 1, + ..Default::default() + }) + .await; - let settings = EncryptionSettings { rotation_period_msgs: 0, ..Default::default() }; + // When we send no messages at all - let (session, _) = account - .create_group_session_pair(room_id!("!test_room:example.org"), settings) - .await - .unwrap(); + // Then it is not expired + assert!(!session.expired()); + } - assert!(!session.expired()); + #[async_test] + async fn session_is_expired_if_we_rotate_every_message_and_one_was_sent( + ) -> Result<(), MegolmError> { + // Given a session that expires after one message + let session = create_session(EncryptionSettings { + rotation_period_msgs: 1, + ..Default::default() + }) + .await; - let _ = session - .encrypt( - "m.room.message", - &Raw::new(&RoomMessageEventContent::text_plain("Test message"))?.cast(), - ) + // When we send a message + let _ = session + .encrypt( + "m.room.message", + &Raw::new(&RoomMessageEventContent::text_plain("Test message"))?.cast(), + ) + .await; + + // Then the session is expired + assert!(session.expired()); + + Ok(()) + } + + #[async_test] + async fn session_with_rotation_period_is_not_expired_after_no_time() { + // Given a session with a 2h expiration + let session = create_session(EncryptionSettings { + rotation_period: TWO_HOURS, + ..Default::default() + }) + .await; + + // When we don't allow any time to pass + + // Then it is not expired + assert!(!session.expired()); + } + + #[async_test] + async fn session_is_expired_after_rotation_period() { + // Given a session with a 2h expiration + let mut session = create_session(EncryptionSettings { + rotation_period: TWO_HOURS, + ..Default::default() + }) + .await; + + // When 3 hours have passed + let now = SecondsSinceUnixEpoch::now(); + session.creation_time = SecondsSinceUnixEpoch(now.get() - uint!(10800)); + + // Then the session is expired + assert!(session.expired()); + } + + #[async_test] + #[cfg(not(feature = "_disable-minimum-rotation-period-ms"))] + async fn session_does_not_expire_under_one_hour_even_if_we_ask_for_shorter() { + // Given a session with a 100ms expiration + let mut session = create_session(EncryptionSettings { + rotation_period: Duration::from_millis(100), + ..Default::default() + }) + .await; + + // When less than an hour has passed + let now = SecondsSinceUnixEpoch::now(); + session.creation_time = SecondsSinceUnixEpoch(now.get() - uint!(1800)); + + // Then the session is not expired: we enforce a minimum of 1 hour + assert!(!session.expired()); + + // But when more than an hour has passed + session.creation_time = SecondsSinceUnixEpoch(now.get() - uint!(3601)); + + // Then the session is expired + assert!(session.expired()); + } + + #[async_test] + #[cfg(feature = "_disable-minimum-rotation-period-ms")] + async fn with_disable_minrotperiod_feature_sessions_can_expire_quickly() { + // Given a session with a 100ms expiration + let mut session = create_session(EncryptionSettings { + rotation_period: Duration::from_millis(100), + ..Default::default() + }) .await; - assert!(session.expired()); - let settings = EncryptionSettings { rotation_period_msgs: 100_000, ..Default::default() }; + // When less than an hour has passed + let now = SecondsSinceUnixEpoch::now(); + session.creation_time = SecondsSinceUnixEpoch(now.get() - uint!(1800)); + + // Then the session is expired: the feature flag has prevented us enforcing a + // minimum + assert!(session.expired()); + } + + #[async_test] + async fn session_with_zero_msgs_rotation_is_not_expired_initially() { + // Given a session that is supposed to expire after zero messages + let session = create_session(EncryptionSettings { + rotation_period_msgs: 0, + ..Default::default() + }) + .await; - let (session, _) = account - .create_group_session_pair(room_id!("!test_room:example.org"), settings) - .await - .unwrap(); + // When we send no messages - assert!(!session.expired()); - session.message_count.store(1000, Ordering::SeqCst); - assert!(!session.expired()); - session.message_count.store(9999, Ordering::SeqCst); - assert!(!session.expired()); - session.message_count.store(10_000, Ordering::SeqCst); - assert!(session.expired()); + // Then the session is not expired: we are protected against this nonsensical + // setup + assert!(!session.expired()); + } - Ok(()) + #[async_test] + async fn session_with_zero_msgs_rotation_expires_after_one_message( + ) -> Result<(), MegolmError> { + // Given a session that is supposed to expire after zero messages + let session = create_session(EncryptionSettings { + rotation_period_msgs: 0, + ..Default::default() + }) + .await; + + // When we send a message + let _ = session + .encrypt( + "m.room.message", + &Raw::new(&RoomMessageEventContent::text_plain("Test message"))?.cast(), + ) + .await; + + // Then the session is expired: we treated rotation_period_msgs=0 as if it were + // =1 + assert!(session.expired()); + + Ok(()) + } + + #[async_test] + async fn session_expires_after_10k_messages_even_if_we_ask_for_more() { + // Given we asked to expire after 100K messages + let session = create_session(EncryptionSettings { + rotation_period_msgs: 100_000, + ..Default::default() + }) + .await; + + // Sanity: it does not expire after <10K messages + assert!(!session.expired()); + session.message_count.store(1000, Ordering::SeqCst); + assert!(!session.expired()); + session.message_count.store(9999, Ordering::SeqCst); + assert!(!session.expired()); + + // When we have sent >= 10K messages + session.message_count.store(10_000, Ordering::SeqCst); + + // Then it is considered expired: we enforce a maximum of 10K messages before + // rotation. + assert!(session.expired()); + } + + async fn create_session(settings: EncryptionSettings) -> OutboundGroupSession { + let account = + Account::with_device_id(user_id!("@alice:example.org"), device_id!("DEVICEID")) + .static_data; + let (session, _) = account + .create_group_session_pair(room_id!("!test_room:example.org"), settings) + .await + .unwrap(); + session + } } } diff --git a/crates/matrix-sdk-crypto/src/store/mod.rs b/crates/matrix-sdk-crypto/src/store/mod.rs index 21aa405060c..67969a0e23f 100644 --- a/crates/matrix-sdk-crypto/src/store/mod.rs +++ b/crates/matrix-sdk-crypto/src/store/mod.rs @@ -1594,6 +1594,87 @@ impl Store { pub(crate) fn crypto_store(&self) -> Arc { self.inner.store.clone() } + + /// Export the keys that match the given predicate. + /// + /// # Arguments + /// + /// * `predicate` - A closure that will be called for every known + /// `InboundGroupSession`, which represents a room key. If the closure + /// returns `true` the `InboundGroupSession` will be included in the export, + /// if the closure returns `false` it will not be included. + /// + /// # Examples + /// + /// ```no_run + /// # use matrix_sdk_crypto::{OlmMachine, encrypt_room_key_export}; + /// # use ruma::{device_id, user_id, room_id}; + /// # let alice = user_id!("@alice:example.org"); + /// # async { + /// # let machine = OlmMachine::new(&alice, device_id!("DEVICEID")).await; + /// let room_id = room_id!("!test:localhost"); + /// let exported_keys = machine.store().export_room_keys(|s| s.room_id() == room_id).await.unwrap(); + /// let encrypted_export = encrypt_room_key_export(&exported_keys, "1234", 1); + /// # }; + /// ``` + pub async fn export_room_keys( + &self, + predicate: impl FnMut(&InboundGroupSession) -> bool, + ) -> Result> { + let mut exported = Vec::new(); + + let mut sessions = self.get_inbound_group_sessions().await?; + sessions.retain(predicate); + + for session in sessions { + let export = session.export().await; + exported.push(export); + } + + Ok(exported) + } + + /// Export room keys matching a predicate, providing them as an async + /// `Stream`. + /// + /// # Arguments + /// + /// * `predicate` - A closure that will be called for every known + /// `InboundGroupSession`, which represents a room key. If the closure + /// returns `true` the `InboundGroupSession` will be included in the export, + /// if the closure returns `false` it will not be included. + /// + /// # Examples + /// + /// ```no_run + /// use std::pin::pin; + /// + /// use matrix_sdk_crypto::{olm::ExportedRoomKey, OlmMachine}; + /// use ruma::{device_id, room_id, user_id}; + /// use tokio_stream::StreamExt; + /// # async { + /// let alice = user_id!("@alice:example.org"); + /// let machine = OlmMachine::new(&alice, device_id!("DEVICEID")).await; + /// let room_id = room_id!("!test:localhost"); + /// let mut keys = pin!(machine + /// .store() + /// .export_room_keys_stream(|s| s.room_id() == room_id) + /// .await + /// .unwrap()); + /// while let Some(key) = keys.next().await { + /// println!("{}", key.room_id); + /// } + /// # }; + /// ``` + pub async fn export_room_keys_stream( + &self, + predicate: impl FnMut(&InboundGroupSession) -> bool, + ) -> Result> { + // TODO: if/when there is a get_inbound_group_sessions_stream, use that here. + let sessions = self.get_inbound_group_sessions().await?; + Ok(futures_util::stream::iter(sessions.into_iter().filter(predicate)) + .then(|session| async move { session.export().await })) + } } impl Deref for Store { @@ -1622,3 +1703,96 @@ impl matrix_sdk_common::store_locks::BackingStore for LockableCryptoStore { self.0.try_take_leased_lock(lease_duration_ms, key, holder).await } } + +#[cfg(test)] +mod tests { + use std::pin::pin; + + use futures_util::StreamExt; + use matrix_sdk_test::async_test; + use ruma::{room_id, user_id}; + + use crate::{machine::tests::get_machine_pair, types::EventEncryptionAlgorithm}; + + #[async_test] + async fn export_room_keys_provides_selected_keys() { + // Given an OlmMachine with room keys in it + let (alice, _, _) = get_machine_pair(user_id!("@a:s.co"), user_id!("@b:s.co"), false).await; + let room1_id = room_id!("!room1:localhost"); + let room2_id = room_id!("!room2:localhost"); + let room3_id = room_id!("!room3:localhost"); + alice.create_outbound_group_session_with_defaults_test_helper(room1_id).await.unwrap(); + alice.create_outbound_group_session_with_defaults_test_helper(room2_id).await.unwrap(); + alice.create_outbound_group_session_with_defaults_test_helper(room3_id).await.unwrap(); + + // When I export some of the keys + let keys = alice + .store() + .export_room_keys(|s| s.room_id() == room2_id || s.room_id() == room3_id) + .await + .unwrap(); + + // Then the requested keys were provided + assert_eq!(keys.len(), 2); + assert_eq!(keys[0].algorithm, EventEncryptionAlgorithm::MegolmV1AesSha2); + assert_eq!(keys[1].algorithm, EventEncryptionAlgorithm::MegolmV1AesSha2); + assert_eq!(keys[0].room_id, "!room2:localhost"); + assert_eq!(keys[1].room_id, "!room3:localhost"); + assert_eq!(keys[0].session_key.to_base64().len(), 220); + assert_eq!(keys[1].session_key.to_base64().len(), 220); + } + + #[async_test] + async fn export_room_keys_stream_can_provide_all_keys() { + // Given an OlmMachine with room keys in it + let (alice, _, _) = get_machine_pair(user_id!("@a:s.co"), user_id!("@b:s.co"), false).await; + let room1_id = room_id!("!room1:localhost"); + let room2_id = room_id!("!room2:localhost"); + alice.create_outbound_group_session_with_defaults_test_helper(room1_id).await.unwrap(); + alice.create_outbound_group_session_with_defaults_test_helper(room2_id).await.unwrap(); + + // When I export the keys as a stream + let mut keys = pin!(alice.store().export_room_keys_stream(|_| true).await.unwrap()); + + // And collect them + let mut collected = vec![]; + while let Some(key) = keys.next().await { + collected.push(key); + } + + // Then all the keys were provided + assert_eq!(collected.len(), 2); + assert_eq!(collected[0].algorithm, EventEncryptionAlgorithm::MegolmV1AesSha2); + assert_eq!(collected[1].algorithm, EventEncryptionAlgorithm::MegolmV1AesSha2); + assert_eq!(collected[0].room_id, "!room1:localhost"); + assert_eq!(collected[1].room_id, "!room2:localhost"); + assert_eq!(collected[0].session_key.to_base64().len(), 220); + assert_eq!(collected[1].session_key.to_base64().len(), 220); + } + + #[async_test] + async fn export_room_keys_stream_can_provide_a_subset_of_keys() { + // Given an OlmMachine with room keys in it + let (alice, _, _) = get_machine_pair(user_id!("@a:s.co"), user_id!("@b:s.co"), false).await; + let room1_id = room_id!("!room1:localhost"); + let room2_id = room_id!("!room2:localhost"); + alice.create_outbound_group_session_with_defaults_test_helper(room1_id).await.unwrap(); + alice.create_outbound_group_session_with_defaults_test_helper(room2_id).await.unwrap(); + + // When I export the keys as a stream + let mut keys = + pin!(alice.store().export_room_keys_stream(|s| s.room_id() == room1_id).await.unwrap()); + + // And collect them + let mut collected = vec![]; + while let Some(key) = keys.next().await { + collected.push(key); + } + + // Then all the keys matching our predicate were provided, and no others + assert_eq!(collected.len(), 1); + assert_eq!(collected[0].algorithm, EventEncryptionAlgorithm::MegolmV1AesSha2); + assert_eq!(collected[0].room_id, "!room1:localhost"); + assert_eq!(collected[0].session_key.to_base64().len(), 220); + } +} diff --git a/crates/matrix-sdk-crypto/src/utilities.rs b/crates/matrix-sdk-crypto/src/utilities.rs index 09d12f0f25d..103b39aa775 100644 --- a/crates/matrix-sdk-crypto/src/utilities.rs +++ b/crates/matrix-sdk-crypto/src/utilities.rs @@ -12,6 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::num::NonZeroU8; + +use ruma::MilliSecondsSinceUnixEpoch; +use time::{ + format_description::well_known::{iso8601, Iso8601}, + OffsetDateTime, +}; + #[cfg(test)] pub(crate) fn json_convert(value: &T) -> serde_json::Result where @@ -21,3 +29,57 @@ where let json = serde_json::to_string(value)?; serde_json::from_str(&json) } + +const ISO8601_WITH_MILLIS: iso8601::EncodedConfig = iso8601::Config::DEFAULT + .set_time_precision(iso8601::TimePrecision::Second { decimal_digits: NonZeroU8::new(3) }) + .encode(); + +/// Format the given timestamp into a human-readable timestamp. +/// +/// # Returns +/// +/// Provided the timestamp fits within an `OffsetDateTime` (ie, it is on or +/// before year 9999), a string that looks like `1970-01-01T00:00:00.000Z`. +/// Otherwise, `None`. +pub fn timestamp_to_iso8601(ts: MilliSecondsSinceUnixEpoch) -> Option { + let nanos_since_epoch = i128::from(ts.get()) * 1_000_000; + + // OffsetDateTime has a max year of 9999, whereas MilliSecondsSinceUnixEpoch has + // a max year of 285427, so `from_unix_timestamp_nanos` can overflow for very + // large timestamps. (The Y10K problem!) + let dt = OffsetDateTime::from_unix_timestamp_nanos(nanos_since_epoch).ok()?; + + // SAFETY: `format` can fail if: + // * The input lacks information on a component we have asked it to format + // (eg, it is given a `Time` and we ask it for a date), or + // * The input contains an invalid component (eg 30th February), or + // * An `io::Error` is raised internally. + // + // The first two cannot occur because we know we are giving it a valid + // OffsetDateTime that has all the components we are asking it to print. + // + // The third should not occur because we are formatting a short string to an + // in-memory buffer. + + Some(dt.format(&Iso8601::).unwrap()) +} + +#[cfg(test)] +pub(crate) mod tests { + use ruma::{MilliSecondsSinceUnixEpoch, UInt}; + + use super::timestamp_to_iso8601; + + #[test] + fn test_timestamp_to_iso8601() { + assert_eq!( + timestamp_to_iso8601(MilliSecondsSinceUnixEpoch(UInt::new_saturating(0))), + Some("1970-01-01T00:00:00.000Z".to_owned()) + ); + assert_eq!( + timestamp_to_iso8601(MilliSecondsSinceUnixEpoch(UInt::new_saturating(1709657033012))), + Some("2024-03-05T16:43:53.012Z".to_owned()) + ); + assert_eq!(timestamp_to_iso8601(MilliSecondsSinceUnixEpoch(UInt::MAX)), None); + } +} diff --git a/crates/matrix-sdk-crypto/src/verification/mod.rs b/crates/matrix-sdk-crypto/src/verification/mod.rs index 72fe1a31bd0..8da2192c47c 100644 --- a/crates/matrix-sdk-crypto/src/verification/mod.rs +++ b/crates/matrix-sdk-crypto/src/verification/mod.rs @@ -364,6 +364,8 @@ impl Cancelled { } CancelCode::InvalidMessage => "The received message was invalid.", CancelCode::KeyMismatch => "The expected key did not match the verified one", + CancelCode::MismatchedCommitment => "The hash commitment did not match.", + CancelCode::MismatchedSas => "The SAS did not match.", CancelCode::Timeout => "The verification process timed out.", CancelCode::UnexpectedMessage => "The device received an unexpected message.", CancelCode::UnknownMethod => { diff --git a/crates/matrix-sdk-sqlite/src/state_store.rs b/crates/matrix-sdk-sqlite/src/state_store.rs index 5ad856a0858..211317c3bbc 100644 --- a/crates/matrix-sdk-sqlite/src/state_store.rs +++ b/crates/matrix-sdk-sqlite/src/state_store.rs @@ -1517,6 +1517,13 @@ impl StateStore for SqliteStateStore { self.acquire().await?.get_kv_blob(self.encode_custom_key(key)).await } + async fn set_custom_value_no_read(&self, key: &[u8], value: Vec) -> Result<()> { + let conn = self.acquire().await?; + let key = self.encode_custom_key(key); + conn.set_kv_blob(key, value).await?; + Ok(()) + } + async fn set_custom_value(&self, key: &[u8], value: Vec) -> Result>> { let conn = self.acquire().await?; let key = self.encode_custom_key(key); diff --git a/crates/matrix-sdk-ui/Cargo.toml b/crates/matrix-sdk-ui/Cargo.toml index f18b6edb51c..d86bff99b13 100644 --- a/crates/matrix-sdk-ui/Cargo.toml +++ b/crates/matrix-sdk-ui/Cargo.toml @@ -12,6 +12,9 @@ default = ["e2e-encryption", "native-tls"] e2e-encryption = ["matrix-sdk/e2e-encryption"] +# This feature will unify the `invites` list with the `all_rooms` list. +experimental-room-list-with-unified-invites = [] + native-tls = ["matrix-sdk/native-tls"] rustls-tls = ["matrix-sdk/rustls-tls"] @@ -31,7 +34,7 @@ eyeball-im-util = { workspace = true } futures-core = { workspace = true } futures-util = { workspace = true } fuzzy-matcher = "0.3.7" -imbl = { version = "2.0.0", features = ["serde"] } +imbl = { workspace = true, features = ["serde"] } indexmap = "2.0.0" itertools = { workspace = true } matrix-sdk = { workspace = true, features = ["experimental-oidc", "experimental-sliding-sync"] } diff --git a/crates/matrix-sdk-ui/src/encryption_sync_service.rs b/crates/matrix-sdk-ui/src/encryption_sync_service.rs index 56e2051d69f..c4ca039f04e 100644 --- a/crates/matrix-sdk-ui/src/encryption_sync_service.rs +++ b/crates/matrix-sdk-ui/src/encryption_sync_service.rs @@ -26,7 +26,7 @@ //! //! [NSE]: https://developer.apple.com/documentation/usernotifications/unnotificationserviceextension -use std::time::Duration; +use std::{pin::Pin, time::Duration}; use async_stream::stream; use futures_core::stream::Stream; @@ -34,7 +34,7 @@ use futures_util::{pin_mut, StreamExt}; use matrix_sdk::{Client, SlidingSync, LEASE_DURATION_MS}; use ruma::{api::client::sync::sync_events::v4, assign}; use tokio::sync::OwnedMutexGuard; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace, Span}; /// Unit type representing a permit to *use* an [`EncryptionSyncService`]. /// @@ -143,6 +143,7 @@ impl EncryptionSyncService { /// Note: the [`EncryptionSyncPermit`] parameter ensures that there's at /// most one encryption sync running at any time. See its documentation /// for more details. + #[instrument(skip_all, fields(store_generation))] pub async fn run_fixed_iterations( self, num_iterations: u8, @@ -152,7 +153,7 @@ impl EncryptionSyncService { pin_mut!(sync); - let _lock_guard = if self.with_locking { + let lock_guard = if self.with_locking { let mut lock_guard = self.client.encryption().try_lock_store_once().await.map_err(Error::LockError)?; @@ -192,6 +193,8 @@ impl EncryptionSyncService { None }; + Span::current().record("store_generation", lock_guard.map(|guard| guard.generation())); + for _ in 0..num_iterations { match sync.next().await { Some(Ok(update_summary)) => { @@ -241,17 +244,7 @@ impl EncryptionSyncService { pin_mut!(sync); loop { - let guard = if self.with_locking { - self.client - .encryption() - .spin_lock_store(Some(60000)) - .await - .map_err(Error::LockError)? - } else { - None - }; - - match sync.next().await { + match self.next_sync_with_lock(&mut sync).await? { Some(Ok(update_summary)) => { // This API is only concerned with the e2ee and to-device extensions. // Warn if anything weird has been received from the proxy. @@ -264,18 +257,12 @@ impl EncryptionSyncService { // Cool cool, let's do it again. trace!("Encryption sync received an update!"); - - drop(guard); - yield Ok(()); continue; } Some(Err(err)) => { trace!("Encryption sync stopped because of an error: {err:#}"); - - drop(guard); - yield Err(Error::SlidingSync(err)); break; } @@ -289,6 +276,24 @@ impl EncryptionSyncService { }) } + /// Helper function for `sync`. Take the cross-process store lock, and call + /// `sync.next()` + #[instrument(skip_all, fields(store_generation))] + async fn next_sync_with_lock( + &self, + sync: &mut Pin<&mut impl Stream>, + ) -> Result, Error> { + let guard = if self.with_locking { + self.client.encryption().spin_lock_store(Some(60000)).await.map_err(Error::LockError)? + } else { + None + }; + + Span::current().record("store_generation", guard.map(|guard| guard.generation())); + + Ok(sync.next().await) + } + /// Requests that the underlying sliding sync be stopped. /// /// This will unlock the cross-process lock, if taken. diff --git a/crates/matrix-sdk-ui/src/event_cache.rs b/crates/matrix-sdk-ui/src/event_cache.rs deleted file mode 100644 index 916a8e88ae6..00000000000 --- a/crates/matrix-sdk-ui/src/event_cache.rs +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2024 The Matrix.org Foundation C.I.C. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The event cache is an abstraction layer, sitting between the Rust SDK and a -//! final client, that acts as a global observer of all the rooms, gathering and -//! inferring some extra useful information about each room. In particular, this -//! doesn't require subscribing to a specific room to get access to this -//! information. -//! -//! It's intended to be fast, robust and easy to maintain. -//! -//! See the [github issue](https://github.com/matrix-org/matrix-rust-sdk/issues/3058) for more details about the historical reasons that led us to start writing this. -//! -//! Most of it is still a work-in-progress, as of 2024-01-22. -//! -//! The desired set of features it may eventually implement is the following: -//! -//! - [ ] compute proper unread room counts, and use backpagination to get -//! missing messages/notifications/mentions, if needs be. -//! - [ ] expose that information with a new data structure similar to the -//! `RoomInfo`, and that may update a `RoomListService`. -//! - [ ] provide read receipts for each message. -//! - [ ] backwards and forward pagination, and reconcile results with cached -//! timelines. -//! - [ ] retry decryption upon receiving new keys (from an encryption sync -//! service or from a key backup). -//! - [ ] expose the latest event for a given room. -//! - [ ] caching of events on-disk. - -#![forbid(missing_docs)] - -use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; - -use matrix_sdk::{Client, Room}; -use matrix_sdk_base::{ - deserialized_responses::{AmbiguityChange, SyncTimelineEvent}, - sync::{JoinedRoomUpdate, LeftRoomUpdate, Timeline}, -}; -use ruma::{ - events::{AnyRoomAccountDataEvent, AnySyncEphemeralRoomEvent}, - serde::Raw, - OwnedEventId, OwnedRoomId, RoomId, -}; -use tokio::{ - spawn, - sync::{ - broadcast::{error::RecvError, Receiver, Sender}, - RwLock, - }, - task::JoinHandle, -}; -use tracing::{error, trace}; - -use self::store::{EventCacheStore, MemoryStore}; - -mod store; - -/// An error observed in the [`EventCache`]. -#[derive(thiserror::Error, Debug)] -pub enum EventCacheError { - /// A room hasn't been found, when trying to create a view for that room. - #[error("Room with id {0} not found")] - RoomNotFound(OwnedRoomId), -} - -/// A result using the [`EventCacheError`]. -pub type Result = std::result::Result; - -/// Hold handles to the tasks spawn by a [`RoomEventCache`]. -pub struct EventCacheDropHandles { - listen_updates_task: JoinHandle<()>, -} - -impl Debug for EventCacheDropHandles { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("EventCacheDropHandles").finish_non_exhaustive() - } -} - -impl Drop for EventCacheDropHandles { - fn drop(&mut self) { - self.listen_updates_task.abort(); - } -} - -/// An event cache, providing lots of useful functionality for clients. -/// -/// See also the module-level comment. -pub struct EventCache { - inner: Arc>, - - drop_handles: Arc, -} - -impl Debug for EventCache { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("EventCache").finish_non_exhaustive() - } -} - -impl EventCache { - /// Create a new [`EventCache`] for the given client. - pub fn new(client: Client) -> Self { - let mut room_updates_feed = client.subscribe_to_all_room_updates(); - - let store = Arc::new(MemoryStore::new()); - let inner = - Arc::new(RwLock::new(EventCacheInner { client, by_room: Default::default(), store })); - - // Spawn the task that will listen to all the room updates at once. - trace!("Spawning the listen task"); - let listen_updates_task = spawn({ - let inner = inner.clone(); - - async move { - loop { - match room_updates_feed.recv().await { - Ok(updates) => { - // We received some room updates. Handle them. - - // Left rooms. - for (room_id, left_room_update) in updates.leave { - let room = match inner.write().await.for_room(&room_id).await { - Ok(room) => room, - Err(err) => { - error!("can't get left room {room_id}: {err}"); - continue; - } - }; - - if let Err(err) = - room.inner.handle_left_room_update(left_room_update).await - { - error!("handling left room update: {err}"); - } - } - - // Joined rooms. - for (room_id, joined_room_update) in updates.join { - let room = match inner.write().await.for_room(&room_id).await { - Ok(room) => room, - Err(err) => { - error!("can't get joined room {room_id}: {err}"); - continue; - } - }; - - if let Err(err) = - room.inner.handle_joined_room_update(joined_room_update).await - { - error!("handling joined room update: {err}"); - } - } - - // Invited rooms. - // TODO: we don't anything with `updates.invite` at - // this point. - } - - Err(RecvError::Lagged(_)) => { - // Forget everything we know; we could have missed events, and we have - // no way to reconcile at the moment! - // TODO: implement Smart Matching™, - let mut inner = inner.write().await; - for room_id in inner.by_room.keys() { - if let Err(err) = inner.store.clear_room_events(room_id).await { - error!("unable to clear room after room updates lag: {err}"); - } - } - inner.by_room.clear(); - } - - Err(RecvError::Closed) => { - // The sender has shut down, exit. - break; - } - } - } - } - }); - - Self { inner, drop_handles: Arc::new(EventCacheDropHandles { listen_updates_task }) } - } - - /// Return a room-specific view over the [`EventCache`]. - /// - /// It may not be found, if the room isn't known to the client. - pub async fn for_room( - &self, - room_id: &RoomId, - ) -> Result<(RoomEventCache, Arc)> { - let room = self.inner.write().await.for_room(room_id).await?; - - Ok((room, self.drop_handles.clone())) - } - - /// Add an initial set of events to the event cache, reloaded from a cache. - /// - /// TODO: temporary for API compat, as the event cache should take care of - /// its own store. - pub async fn add_initial_events( - &mut self, - room_id: &RoomId, - events: Vec, - ) -> Result<()> { - let room_cache = self.inner.write().await.for_room(room_id).await?; - room_cache.inner.append_events(events).await?; - Ok(()) - } -} - -struct EventCacheInner { - /// Reference to the client used to navigate this cache. - client: Client, - - /// Lazily-filled cache of live [`RoomEventCache`], once per room. - by_room: BTreeMap, - - /// Backend used for storage. - store: Arc, -} - -impl EventCacheInner { - /// Return a room-specific view over the [`EventCache`]. - /// - /// It may not be found, if the room isn't known to the client. - async fn for_room(&mut self, room_id: &RoomId) -> Result { - match self.by_room.get(room_id) { - Some(room) => Ok(room.clone()), - None => { - let room = self - .client - .get_room(room_id) - .ok_or_else(|| EventCacheError::RoomNotFound(room_id.to_owned()))?; - let room_event_cache = RoomEventCache::new(room, self.store.clone()); - - self.by_room.insert(room_id.to_owned(), room_event_cache.clone()); - - Ok(room_event_cache) - } - } - } -} - -/// A subset of an event cache, for a room. -/// -/// Cloning is shallow, and thus is cheap to do. -#[derive(Clone)] -pub struct RoomEventCache { - inner: Arc, -} - -impl Debug for RoomEventCache { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RoomEventCache").finish_non_exhaustive() - } -} - -impl RoomEventCache { - /// Create a new [`RoomEventCache`] using the given room and store. - fn new(room: Room, store: Arc) -> Self { - Self { inner: Arc::new(RoomEventCacheInner::new(room, store)) } - } - - /// Subscribe to room updates for this room, after getting the initial list - /// of events. XXX: Could/should it use some kind of `Observable` - /// instead? Or not something async, like explicit handlers as our event - /// handlers? - pub async fn subscribe( - &self, - ) -> Result<(Vec, Receiver)> { - Ok(( - self.inner.store.room_events(self.inner.room.room_id()).await?, - self.inner.sender.subscribe(), - )) - } -} - -struct RoomEventCacheInner { - sender: Sender, - store: Arc, - room: Room, -} - -impl RoomEventCacheInner { - /// Creates a new cache for a room, and subscribes to room updates, so as - /// to handle new timeline events. - fn new(room: Room, store: Arc) -> Self { - let sender = Sender::new(32); - Self { room, store, sender } - } - - async fn handle_joined_room_update(&self, updates: JoinedRoomUpdate) -> Result<()> { - self.handle_timeline( - updates.timeline, - updates.ephemeral.clone(), - updates.account_data, - updates.ambiguity_changes, - ) - .await?; - Ok(()) - } - - async fn handle_timeline( - &self, - timeline: Timeline, - ephemeral: Vec>, - account_data: Vec>, - ambiguity_changes: BTreeMap, - ) -> Result<()> { - let room_id = self.room.room_id(); - - if timeline.limited { - // Ideally we'd try to reconcile existing events against those received in the - // timeline, but we're not there yet. In the meanwhile, clear the - // items from the room. TODO: implement Smart Matching™. - trace!("limited timeline, clearing all previous events"); - self.store.clear_room_events(room_id).await?; - let _ = self.sender.send(RoomEventCacheUpdate::Clear); - } - - // Add all the events to the backend. - trace!("adding new events"); - self.store.add_room_events(room_id, timeline.events.clone()).await?; - - // Propagate events to observers. - let _ = self.sender.send(RoomEventCacheUpdate::Append { - events: timeline.events, - prev_batch: timeline.prev_batch, - ephemeral, - account_data, - ambiguity_changes, - }); - - Ok(()) - } - - async fn handle_left_room_update(&self, updates: LeftRoomUpdate) -> Result<()> { - self.handle_timeline(updates.timeline, Vec::new(), Vec::new(), updates.ambiguity_changes) - .await?; - Ok(()) - } - - /// Append a set of events to the room cache and storage, notifying - /// observers. - async fn append_events(&self, events: Vec) -> Result<()> { - self.store.add_room_events(self.room.room_id(), events.clone()).await?; - - let _ = self.sender.send(RoomEventCacheUpdate::Append { - events, - prev_batch: None, - account_data: Default::default(), - ephemeral: Default::default(), - ambiguity_changes: Default::default(), - }); - - Ok(()) - } -} - -/// An update related to events happened in a room. -#[derive(Clone)] -pub enum RoomEventCacheUpdate { - /// The room has been cleared from events. - Clear, - /// The room has new events. - Append { - /// All the new events that have been added to the room. - events: Vec, - /// XXX: this is temporary, until backpagination lives in the event - /// cache. - prev_batch: Option, - /// XXX: this is temporary, until account data lives in the event cache - /// — or will it live there? - account_data: Vec>, - /// XXX: this is temporary, until read receipts are handled in the event - /// cache - ephemeral: Vec>, - /// Collection of ambiguity changes that room member events trigger. - /// - /// This is a map of event ID of the `m.room.member` event to the - /// details of the ambiguity change. - ambiguity_changes: BTreeMap, - }, -} diff --git a/crates/matrix-sdk-ui/src/event_cache/store.rs b/crates/matrix-sdk-ui/src/event_cache/store.rs deleted file mode 100644 index 984958fcdd4..00000000000 --- a/crates/matrix-sdk-ui/src/event_cache/store.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::collections::BTreeMap; - -use async_trait::async_trait; -use matrix_sdk_base::deserialized_responses::SyncTimelineEvent; -use ruma::{OwnedRoomId, RoomId}; -use tokio::sync::RwLock; - -use super::Result; - -/// A store that can be remember information about the event cache. -/// -/// It really acts as a cache, in the sense that clearing the backing data -/// should not have any irremediable effect, other than providing a lesser user -/// experience. -#[async_trait] -pub trait EventCacheStore: Send + Sync { - /// Returns all the known events for the given room. - async fn room_events(&self, room: &RoomId) -> Result>; - - /// Adds all the events to the given room. - async fn add_room_events(&self, room: &RoomId, events: Vec) -> Result<()>; - - /// Clear all the events from the given room. - async fn clear_room_events(&self, room: &RoomId) -> Result<()>; -} - -/// An [`EventCacheStore`] implementation that keeps all the information in -/// memory. -pub(crate) struct MemoryStore { - /// All the events per room, in sync order. - by_room: RwLock>>, -} - -impl MemoryStore { - /// Create a new empty [`MemoryStore`]. - pub fn new() -> Self { - Self { by_room: Default::default() } - } -} - -#[async_trait] -impl EventCacheStore for MemoryStore { - async fn room_events(&self, room: &RoomId) -> Result> { - Ok(self.by_room.read().await.get(room).cloned().unwrap_or_default()) - } - - async fn add_room_events(&self, room: &RoomId, events: Vec) -> Result<()> { - self.by_room.write().await.entry(room.to_owned()).or_default().extend(events); - Ok(()) - } - - async fn clear_room_events(&self, room: &RoomId) -> Result<()> { - let _ = self.by_room.write().await.remove(room); - Ok(()) - } -} diff --git a/crates/matrix-sdk-ui/src/lib.rs b/crates/matrix-sdk-ui/src/lib.rs index 863553650b0..3537a39e281 100644 --- a/crates/matrix-sdk-ui/src/lib.rs +++ b/crates/matrix-sdk-ui/src/lib.rs @@ -17,11 +17,11 @@ use ruma::html::HtmlSanitizerMode; mod events; pub mod encryption_sync_service; -pub mod event_cache; pub mod notification_client; pub mod room_list_service; pub mod sync_service; pub mod timeline; +pub mod unable_to_decrypt_hook; pub use self::{room_list_service::RoomListService, timeline::Timeline}; diff --git a/crates/matrix-sdk-ui/src/notification_client.rs b/crates/matrix-sdk-ui/src/notification_client.rs index d0efb560874..86308c9adbc 100644 --- a/crates/matrix-sdk-ui/src/notification_client.rs +++ b/crates/matrix-sdk-ui/src/notification_client.rs @@ -452,7 +452,7 @@ impl NotificationClient { } Ok(NotificationStatus::Event( - NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?, + NotificationItem::new(&room, raw_event, push_actions.as_deref(), Vec::new()).await?, )) } @@ -501,7 +501,7 @@ impl NotificationClient { Ok(Some( NotificationItem::new( &room, - &RawNotificationEvent::Timeline(timeline_event.event.cast()), + RawNotificationEvent::Timeline(timeline_event.event.cast()), timeline_event.push_actions.as_deref(), state_events, ) @@ -573,14 +573,26 @@ impl NotificationClientBuilder { } } -enum RawNotificationEvent { +/// The Notification event as it was fetched from remote for the +/// given `event_id`, represented as Raw but decrypted, thus only +/// whether it is an invite or regular Timeline event has been +/// determined. +#[derive(Debug)] +pub enum RawNotificationEvent { + /// The raw event for a timeline event Timeline(Raw), + /// The notification contains an invitation with the given + /// StrippedRoomMemberEvent (in raw here) Invite(Raw), } +/// The deserialized Event as it was fetched from remote for the +/// given `event_id` and after decryption (if possible). #[derive(Debug)] pub enum NotificationEvent { + /// The Notification was for a TimelineEvent Timeline(AnySyncTimelineEvent), + /// The Notification is an invite with the given stripped room event data Invite(StrippedRoomMemberEvent), } @@ -599,6 +611,9 @@ pub struct NotificationItem { /// Underlying Ruma event. pub event: NotificationEvent, + /// The raw of the underlying event. + pub raw_event: RawNotificationEvent, + /// Display name of the sender. pub sender_display_name: Option, /// Avatar URL of the sender. @@ -630,11 +645,11 @@ pub struct NotificationItem { impl NotificationItem { async fn new( room: &Room, - raw_event: &RawNotificationEvent, + raw_event: RawNotificationEvent, push_actions: Option<&[Action]>, state_events: Vec>, ) -> Result { - let event = match raw_event { + let event = match &raw_event { RawNotificationEvent::Timeline(raw_event) => { let mut event = raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?; if let AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage( @@ -694,6 +709,7 @@ impl NotificationItem { let item = NotificationItem { event, + raw_event, sender_display_name, sender_avatar_url, is_sender_name_ambiguous, diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/all.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/all.rs index 8470e6c982b..a0e4e03ff9e 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/all.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/all.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use super::{super::room_list::BoxedFilterFn, Filter}; /// Create a new filter that will run multiple filters. It returns `false` if at diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/any.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/any.rs index dd92ddf73f7..a0751901d36 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/any.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/any.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use super::{super::room_list::BoxedFilterFn, Filter}; /// Create a new filter that will run multiple filters. It returns `true` if at diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/category.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/category.rs index f7041ca14a3..cc73ad51a3c 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/category.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/category.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use matrix_sdk::{Client, RoomListEntry}; use super::Filter; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/favourite.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/favourite.rs new file mode 100644 index 00000000000..257f686a558 --- /dev/null +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/favourite.rs @@ -0,0 +1,96 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use matrix_sdk::{Client, RoomListEntry}; + +use super::Filter; + +struct FavouriteRoomMatcher +where + F: Fn(&RoomListEntry) -> Option, +{ + is_favourite: F, +} + +impl FavouriteRoomMatcher +where + F: Fn(&RoomListEntry) -> Option, +{ + fn matches(&self, room_list_entry: &RoomListEntry) -> bool { + if !matches!(room_list_entry, RoomListEntry::Filled(_) | RoomListEntry::Invalidated(_)) { + return false; + } + + (self.is_favourite)(room_list_entry).unwrap_or(false) + } +} + +/// Create a new filter that will accept all filled or invalidated entries, but +/// filters out rooms that are not marked as favourite (see +/// [`matrix_sdk_base::Room::is_favourite`]). +pub fn new_filter(client: &Client) -> impl Filter { + let client = client.clone(); + + let matcher = FavouriteRoomMatcher { + is_favourite: move |room| { + let room_id = room.as_room_id()?; + let room = client.get_room(room_id)?; + + Some(room.is_favourite()) + }, + }; + + move |room_list_entry| -> bool { matcher.matches(room_list_entry) } +} + +#[cfg(test)] +mod tests { + use std::ops::Not; + + use matrix_sdk::RoomListEntry; + use ruma::room_id; + + use super::FavouriteRoomMatcher; + + #[test] + fn test_is_favourite() { + let matcher = FavouriteRoomMatcher { is_favourite: |_| Some(true) }; + + assert!(matcher.matches(&RoomListEntry::Empty).not()); + assert!(matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned()))); + assert!(matcher.matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned()))); + } + + #[test] + fn test_is_not_favourite() { + let matcher = FavouriteRoomMatcher { is_favourite: |_| Some(false) }; + + assert!(matcher.matches(&RoomListEntry::Empty).not()); + assert!(matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned())).not()); + assert!(matcher + .matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned())) + .not()); + } + + #[test] + fn test_favourite_state_cannot_be_found() { + let matcher = FavouriteRoomMatcher { is_favourite: |_| None }; + + assert!(matcher.matches(&RoomListEntry::Empty).not()); + assert!(matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned())).not()); + assert!(matcher + .matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned())) + .not()); + } +} diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/fuzzy_match_room_name.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/fuzzy_match_room_name.rs index 9e8e672fd04..4b476035fc6 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/fuzzy_match_room_name.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/fuzzy_match_room_name.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + pub use fuzzy_matcher::{skim::SkimMatcherV2, FuzzyMatcher as _}; use matrix_sdk::Client; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/invite.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/invite.rs new file mode 100644 index 00000000000..661981524e5 --- /dev/null +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/invite.rs @@ -0,0 +1,81 @@ +use matrix_sdk::{Client, RoomListEntry}; +use matrix_sdk_base::RoomState; + +use super::Filter; + +struct InviteRoomMatcher +where + F: Fn(&RoomListEntry) -> Option, +{ + state: F, +} + +impl InviteRoomMatcher +where + F: Fn(&RoomListEntry) -> Option, +{ + fn matches(&self, room: &RoomListEntry) -> bool { + if !matches!(room, RoomListEntry::Filled(_) | RoomListEntry::Invalidated(_)) { + return false; + } + + if let Some(state) = (self.state)(room) { + state == RoomState::Invited + } else { + false + } + } +} + +/// Create a new filter that will accept all filled or invalidated entries, but +/// filters out rooms that are not invites (see +/// [`matrix_sdk_base::RoomState::Invited`]). +pub fn new_filter(client: &Client) -> impl Filter { + let client = client.clone(); + + let matcher = InviteRoomMatcher { + state: move |room| { + let room_id = room.as_room_id()?; + let room = client.get_room(room_id)?; + Some(room.state()) + }, + }; + + move |room_list_entry| -> bool { matcher.matches(room_list_entry) } +} + +#[cfg(test)] +mod tests { + use matrix_sdk::RoomListEntry; + use matrix_sdk_base::RoomState; + use ruma::room_id; + + use super::InviteRoomMatcher; + + #[test] + fn test_all_invite_kind_of_room_list_entry() { + // When we can't figure out the room state, nothing matches. + let matcher = InviteRoomMatcher { state: |_| None }; + assert!(!matcher.matches(&RoomListEntry::Empty)); + assert!(!matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned()))); + assert!(!matcher.matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned()))); + + // When a room has been left, it doesn't match. + let matcher = InviteRoomMatcher { state: |_| Some(RoomState::Left) }; + assert!(!matcher.matches(&RoomListEntry::Empty)); + assert!(!matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned()))); + assert!(!matcher.matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned()))); + + // When a room has been joined, it doesn't match. + let matcher = InviteRoomMatcher { state: |_| Some(RoomState::Joined) }; + assert!(!matcher.matches(&RoomListEntry::Empty)); + assert!(!matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned()))); + assert!(!matcher.matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned()))); + + // When a room has been joined, it does match (unless it's empty). + let matcher = InviteRoomMatcher { state: |_| Some(RoomState::Invited) }; + assert!(!matcher.matches(&RoomListEntry::Empty)); + assert!(matcher.matches(&RoomListEntry::Filled(room_id!("!r0:bar.org").to_owned()))); + assert!(matcher.matches(&RoomListEntry::Invalidated(room_id!("!r0:bar.org").to_owned()))); + } +} diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/mod.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/mod.rs index 794a2ddff41..fdc66793465 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/mod.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/mod.rs @@ -1,7 +1,65 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A collection of room filters. +//! +//! The room list can provide an access to the rooms per list, like with +//! [`super::RoomList::entries_with_dynamic_adapters`]. The provided collection +//! of rooms can be filtered with these filters. A classical usage would be the +//! following: +//! +//! ```rust +//! use matrix_sdk::Client; +//! use matrix_sdk_ui::room_list_service::{ +//! filters, RoomListDynamicEntriesController, +//! }; +//! +//! fn configure_room_list( +//! client: &Client, +//! entries_controller: &RoomListDynamicEntriesController, +//! ) { +//! // _All_ non-left rooms +//! // _and_ that fall in the “People” category, +//! // _and_ that are marked as favourite, +//! // _and_ that are _not_ unread. +//! entries_controller.set_filter(Box::new( +//! // All +//! filters::new_filter_all(vec![ +//! // Non-left +//! Box::new(filters::new_filter_non_left(&client)), +//! // People +//! Box::new(filters::new_filter_category( +//! client, +//! filters::RoomCategory::People, +//! )), +//! // Favourite +//! Box::new(filters::new_filter_favourite(client)), +//! // Not Unread +//! Box::new(filters::new_filter_not(Box::new( +//! filters::new_filter_unread(client), +//! ))), +//! ]), +//! )); +//! } +//! ``` + mod all; mod any; mod category; +mod favourite; mod fuzzy_match_room_name; +mod invite; mod non_left; mod none; mod normalized_match_room_name; @@ -11,7 +69,9 @@ mod unread; pub use all::new_filter as new_filter_all; pub use any::new_filter as new_filter_any; pub use category::{new_filter as new_filter_category, RoomCategory}; +pub use favourite::new_filter as new_filter_favourite; pub use fuzzy_match_room_name::new_filter as new_filter_fuzzy_match_room_name; +pub use invite::new_filter as new_filter_invite; use matrix_sdk::RoomListEntry; pub use non_left::new_filter as new_filter_non_left; pub use none::new_filter as new_filter_none; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/non_left.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/non_left.rs index b31750772cb..c2e8c7fbba9 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/non_left.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/non_left.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use matrix_sdk::{Client, RoomListEntry}; use matrix_sdk_base::RoomState; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/none.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/none.rs index 99e97be9c24..d94645c62f7 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/none.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/none.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use super::Filter; /// Create a new filter that will reject all entries. diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/normalized_match_room_name.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/normalized_match_room_name.rs index c0a49602de6..184404bd78e 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/normalized_match_room_name.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/normalized_match_room_name.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use matrix_sdk::Client; use super::{normalize_string, Filter}; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/not.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/not.rs index b0a63d73689..4e7713147ca 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/not.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/not.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use std::ops::Not; use super::{super::room_list::BoxedFilterFn, Filter}; diff --git a/crates/matrix-sdk-ui/src/room_list_service/filters/unread.rs b/crates/matrix-sdk-ui/src/room_list_service/filters/unread.rs index d78911bc899..6a5c3d23d88 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/filters/unread.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/filters/unread.rs @@ -1,3 +1,17 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use matrix_sdk::{Client, RoomListEntry}; use matrix_sdk_base::read_receipts::RoomReadReceipts; diff --git a/crates/matrix-sdk-ui/src/room_list_service/mod.rs b/crates/matrix-sdk-ui/src/room_list_service/mod.rs index f98ee88cdd7..5d03cc6505b 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/mod.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/mod.rs @@ -73,8 +73,8 @@ use eyeball::{SharedObservable, Subscriber}; use futures_util::{pin_mut, Stream, StreamExt}; pub use matrix_sdk::RoomListEntry; use matrix_sdk::{ - sliding_sync::Ranges, Client, Error as SlidingSyncError, SlidingSync, SlidingSyncList, - SlidingSyncListBuilder, SlidingSyncMode, + event_cache::EventCacheError, sliding_sync::Ranges, Client, Error as SlidingSyncError, + SlidingSync, SlidingSyncList, SlidingSyncListBuilder, SlidingSyncMode, }; use matrix_sdk_base::ring_buffer::RingBuffer; pub use room::*; @@ -95,8 +95,6 @@ use tokio::{ time::timeout, }; -use crate::event_cache::EventCacheError; - /// The [`RoomListService`] type. See the module's documentation to learn more. #[derive(Debug)] pub struct RoomListService { @@ -138,7 +136,24 @@ impl RoomListService { /// This won't start an encryption sync, and it's the user's responsibility /// to create one in this case using `EncryptionSync`. pub async fn new(client: Client) -> Result { - Self::new_internal(client, false).await + Self::new_internal( + client, + false, + #[cfg(feature = "experimental-room-list-with-unified-invites")] + false, + ) + .await + } + + /// Create a new `RoomList` that disables encryption, and enables the + /// unified invites (i.e. invites are part of the `all_rooms` list; side + /// note: the `invites` list is still present). + #[cfg(feature = "experimental-room-list-with-unified-invites")] + pub async fn new_with_unified_invites( + client: Client, + with_unified_invites: bool, + ) -> Result { + Self::new_internal(client, false, with_unified_invites).await } /// Create a new `RoomList` that enables encryption. @@ -146,10 +161,20 @@ impl RoomListService { /// This will include syncing the encryption information, so there must not /// be any instance of `EncryptionSync` running in the background. pub async fn new_with_encryption(client: Client) -> Result { - Self::new_internal(client, true).await + Self::new_internal( + client, + true, + #[cfg(feature = "experimental-room-list-with-unified-invites")] + false, + ) + .await } - async fn new_internal(client: Client, with_encryption: bool) -> Result { + async fn new_internal( + client: Client, + with_encryption: bool, + #[cfg(feature = "experimental-room-list-with-unified-invites")] with_unified_invites: bool, + ) -> Result { let mut builder = client .sliding_sync("room-list") .map_err(Error::SlidingSync)? @@ -187,6 +212,8 @@ impl RoomListService { (StateEventType::RoomMember, "$ME".to_owned()), (StateEventType::RoomPowerLevels, "".to_owned()), ]), + #[cfg(feature = "experimental-room-list-with-unified-invites")] + with_unified_invites, )) .await .map_err(Error::SlidingSync)? @@ -214,6 +241,9 @@ impl RoomListService { .map(Arc::new) .map_err(Error::SlidingSync)?; + // Eagerly subscribe the event cache to sync responses. + client.event_cache().subscribe()?; + Ok(Self { client, sliding_sync, @@ -478,11 +508,15 @@ impl RoomListService { /// properties, so that they are exactly the same. fn configure_all_or_visible_rooms_list( list_builder: SlidingSyncListBuilder, + #[cfg(feature = "experimental-room-list-with-unified-invites")] with_invites: bool, ) -> SlidingSyncListBuilder { + #[cfg(not(feature = "experimental-room-list-with-unified-invites"))] + let with_invites = false; + list_builder .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .filters(Some(assign!(SyncRequestListFilters::default(), { - is_invite: Some(false), + is_invite: Some(with_invites), is_tombstoned: Some(false), not_room_types: vec!["m.space".to_owned()], }))) @@ -517,6 +551,9 @@ pub enum Error { #[error("An error occurred while initializing the timeline")] InitializingTimeline(#[source] EventCacheError), + + #[error("The attached event cache ran into an error")] + EventCache(#[from] EventCacheError), } /// An input for the [`RoomList`]' state machine. diff --git a/crates/matrix-sdk-ui/src/room_list_service/room.rs b/crates/matrix-sdk-ui/src/room_list_service/room.rs index 0ab3917b8af..5113ec6a56b 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/room.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/room.rs @@ -17,7 +17,7 @@ use std::{ops::Deref, sync::Arc}; use async_once_cell::OnceCell as AsyncOnceCell; -use matrix_sdk::{SlidingSync, SlidingSyncRoom}; +use matrix_sdk::{event_cache, SlidingSync, SlidingSyncRoom}; use ruma::{api::client::sync::sync_events::v4::RoomSubscription, RoomId}; use super::Error; @@ -159,13 +159,19 @@ impl Room { } /// Create a new [`TimelineBuilder`] with the default configuration. - pub async fn default_room_timeline_builder(&self) -> TimelineBuilder { - Timeline::builder(&self.inner.room) - .events( + pub async fn default_room_timeline_builder(&self) -> event_cache::Result { + // TODO we can remove this once the event cache handles his own cache. + self.inner + .room + .client() + .event_cache() + .add_initial_events( + self.inner.room.room_id(), + self.inner.sliding_sync_room.timeline_queue().iter().cloned().collect(), self.inner.sliding_sync_room.prev_batch(), - self.inner.sliding_sync_room.timeline_queue(), ) - .await - .track_read_marker_and_receipts() + .await?; + + Ok(Timeline::builder(&self.inner.room).track_read_marker_and_receipts()) } } diff --git a/crates/matrix-sdk-ui/src/room_list_service/state.rs b/crates/matrix-sdk-ui/src/room_list_service/state.rs index 04faf879842..f2c1991d7e8 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/state.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/state.rs @@ -120,6 +120,8 @@ impl Action for AddVisibleRooms { (StateEventType::RoomEncryption, "".to_owned()), (StateEventType::RoomMember, "$LAZY".to_owned()), ]), + #[cfg(feature = "experimental-room-list-with-unified-invites")] + false, )) .await .map_err(Error::SlidingSync)?; diff --git a/crates/matrix-sdk-ui/src/sync_service.rs b/crates/matrix-sdk-ui/src/sync_service.rs index d86343a4c28..25b36baa6cc 100644 --- a/crates/matrix-sdk-ui/src/sync_service.rs +++ b/crates/matrix-sdk-ui/src/sync_service.rs @@ -435,6 +435,10 @@ pub struct SyncServiceBuilder { /// SDK client. client: Client, + /// Whether we want to unify `all_rooms` and `invites`. + #[cfg(feature = "experimental-room-list-with-unified-invites")] + with_unified_invites_in_room_list: bool, + /// Is the cross-process lock for the crypto store enabled? with_cross_process_lock: bool, @@ -445,7 +449,20 @@ pub struct SyncServiceBuilder { impl SyncServiceBuilder { fn new(client: Client) -> Self { - Self { client, with_cross_process_lock: false, identifier: "app".to_owned() } + Self { + client, + #[cfg(feature = "experimental-room-list-with-unified-invites")] + with_unified_invites_in_room_list: false, + with_cross_process_lock: false, + identifier: "app".to_owned(), + } + } + + #[cfg(feature = "experimental-room-list-with-unified-invites")] + pub fn with_unified_invites_in_room_list(mut self, with_unified_invites: bool) -> Self { + self.with_unified_invites_in_room_list = with_unified_invites; + + self } /// Enables the cross-process lock, if the sync service is being built in a @@ -475,8 +492,16 @@ impl SyncServiceBuilder { pub async fn build(self) -> Result { let encryption_sync_permit = Arc::new(AsyncMutex::new(EncryptionSyncPermit::new())); + #[cfg(not(feature = "experimental-room-list-with-unified-invites"))] let room_list = RoomListService::new(self.client.clone()).await?; + #[cfg(feature = "experimental-room-list-with-unified-invites")] + let room_list = RoomListService::new_with_unified_invites( + self.client.clone(), + self.with_unified_invites_in_room_list, + ) + .await?; + let encryption_sync = Arc::new( EncryptionSyncService::new( self.identifier, diff --git a/crates/matrix-sdk-ui/src/timeline/builder.rs b/crates/matrix-sdk-ui/src/timeline/builder.rs index a00b73ec16a..5c1b0e722f2 100644 --- a/crates/matrix-sdk-ui/src/timeline/builder.rs +++ b/crates/matrix-sdk-ui/src/timeline/builder.rs @@ -16,14 +16,16 @@ use std::{collections::BTreeSet, sync::Arc}; use eyeball::SharedObservable; use futures_util::{pin_mut, StreamExt}; -use imbl::Vector; -use matrix_sdk::{deserialized_responses::SyncTimelineEvent, executor::spawn, Room}; -use matrix_sdk_base::sync::JoinedRoomUpdate; +use matrix_sdk::{ + event_cache::{self, RoomEventCacheUpdate}, + executor::spawn, + Room, +}; use ruma::{ events::{receipt::ReceiptType, AnySyncTimelineEvent}, RoomVersionId, }; -use tokio::sync::{broadcast, mpsc, Notify}; +use tokio::sync::{broadcast, mpsc}; use tracing::{info, info_span, trace, warn, Instrument, Span}; #[cfg(feature = "e2e-encryption")] @@ -33,7 +35,7 @@ use super::{ queue::send_queued_messages, BackPaginationStatus, Timeline, TimelineDropHandle, }; -use crate::event_cache::{EventCache, RoomEventCacheUpdate}; +use crate::{timeline::inner::TimelineEnd, unable_to_decrypt_hook::UtdHookManager}; /// Builder that allows creating and configuring various parts of a /// [`Timeline`]. @@ -41,36 +43,28 @@ use crate::event_cache::{EventCache, RoomEventCacheUpdate}; #[derive(Debug)] pub struct TimelineBuilder { room: Room, - prev_token: Option, settings: TimelineInnerSettings, - event_cache: EventCache, + + /// An optional hook to call whenever we run into an unable-to-decrypt or a + /// late-decryption event. + unable_to_decrypt_hook: Option>, } impl TimelineBuilder { pub(super) fn new(room: &Room) -> Self { Self { room: room.clone(), - prev_token: None, settings: TimelineInnerSettings::default(), - event_cache: EventCache::new(room.client()), + unable_to_decrypt_hook: None, } } - /// Add initial events to the timeline. + /// Sets up a hook to catch unable-to-decrypt (UTD) events for the timeline + /// we're building. /// - /// TODO: remove this, the EventCache should hold the events data in the - /// first place, and we'd provide an existing EventCache to the - /// TimelineBuilder. - pub async fn events( - mut self, - prev_token: Option, - events: Vector, - ) -> Self { - self.prev_token = prev_token; - self.event_cache - .add_initial_events(self.room.room_id(), events.iter().cloned().collect()) - .await - .expect("room exists"); + /// If it was previously set before, will overwrite the previous one. + pub fn with_unable_to_decrypt_hook(mut self, hook: Arc) -> Self { + self.unable_to_decrypt_hook = Some(hook); self } @@ -128,19 +122,24 @@ impl TimelineBuilder { fields( room_id = ?self.room.room_id(), track_read_receipts = self.settings.track_read_receipts, - prev_token = self.prev_token, ) )] - pub async fn build(self) -> crate::event_cache::Result { - let Self { room, event_cache, prev_token, settings } = self; + pub async fn build(self) -> event_cache::Result { + let Self { room, settings, unable_to_decrypt_hook } = self; + + let client = room.client(); + let event_cache = client.event_cache(); + + // Subscribe the event cache to sync responses, in case we hadn't done it yet. + event_cache.subscribe()?; - let (room_event_cache, event_cache_drop) = event_cache.for_room(room.room_id()).await?; + let (room_event_cache, event_cache_drop) = room.event_cache().await?; let (events, mut event_subscriber) = room_event_cache.subscribe().await?; let has_events = !events.is_empty(); let track_read_marker_and_receipts = settings.track_read_receipts; - let mut inner = TimelineInner::new(room).with_settings(settings); + let mut inner = TimelineInner::new(room, unable_to_decrypt_hook).with_settings(settings); if track_read_marker_and_receipts { inner.populate_initial_user_receipt(ReceiptType::Read).await; @@ -148,7 +147,7 @@ impl TimelineBuilder { } if has_events { - inner.add_initial_events(events, prev_token).await; + inner.add_events_at(events, TimelineEnd::Back { from_cache: true }).await; } if track_read_marker_and_receipts { inner.load_fully_read_event().await; @@ -157,9 +156,7 @@ impl TimelineBuilder { let room = inner.room(); let client = room.client(); - let sync_response_notify = Arc::new(Notify::new()); let room_update_join_handle = spawn({ - let sync_response_notify = sync_response_notify.clone(); let inner = inner.clone(); let span = @@ -190,39 +187,22 @@ impl TimelineBuilder { RoomEventCacheUpdate::Append { events, - prev_batch, account_data, ephemeral, ambiguity_changes, } => { trace!("Received new events"); - // XXX this timeline and the joined room updates are synthetic, until - // we get rid of `handle_joined_room_update` by adding all functionality - // back in the event cache, and replacing it with a simple + // TODO: (bnjbvr) account_data and ephemeral should be handled by the + // event cache, and we should replace this with a simple // `handle_add_events`. - let timeline = matrix_sdk_base::sync::Timeline { - limited: false, - prev_batch, - events, - }; - let update = JoinedRoomUpdate { - unread_notifications: Default::default(), - timeline, - state: Default::default(), - account_data, - ephemeral, - ambiguity_changes: Default::default(), - }; - inner.handle_joined_room_update(update).await; + inner.handle_sync_events(events, account_data, ephemeral).await; let member_ambiguity_changes = ambiguity_changes .values() .flat_map(|change| change.user_ids()) .collect::>(); inner.force_update_sender_profiles(&member_ambiguity_changes).await; - - sync_response_notify.notify_waiters(); } } } @@ -299,10 +279,9 @@ impl TimelineBuilder { let timeline = Timeline { inner, - back_pagination_mtx: Default::default(), back_pagination_status: SharedObservable::new(BackPaginationStatus::Idle), - sync_response_notify, msg_sender, + event_cache: room_event_cache, drop_handle: Arc::new(TimelineDropHandle { client, event_handler_handles: handles, diff --git a/crates/matrix-sdk-ui/src/timeline/event_handler.rs b/crates/matrix-sdk-ui/src/timeline/event_handler.rs index 247574ac643..8431ac289e2 100644 --- a/crates/matrix-sdk-ui/src/timeline/event_handler.rs +++ b/crates/matrix-sdk-ui/src/timeline/event_handler.rs @@ -249,7 +249,7 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { state: &'a mut TimelineInnerStateTransaction<'o>, ctx: TimelineEventContext, ) -> Self { - let TimelineInnerStateTransaction { items, meta } = state; + let TimelineInnerStateTransaction { items, meta, .. } = state; Self { items, meta, ctx, result: HandleEventResult::default() } } @@ -297,6 +297,14 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { AnyMessageLikeEventContent::RoomEncrypted(c) => { // TODO: Handle replacements if the replaced event is also UTD self.add(true, TimelineItemContent::unable_to_decrypt(c)); + + // Let the hook know that we ran into an unable-to-decrypt that is added to the + // timeline. + if let Some(hook) = self.meta.unable_to_decrypt_hook.as_ref() { + if let Flow::Remote { event_id, .. } = &self.ctx.flow { + hook.on_utd(event_id); + } + } } AnyMessageLikeEventContent::Sticker(content) => { self.add(should_add, TimelineItemContent::Sticker(Sticker { content })); @@ -309,6 +317,10 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { ) => self.handle_poll_start(c, should_add), AnyMessageLikeEventContent::UnstablePollResponse(c) => self.handle_poll_response(c), AnyMessageLikeEventContent::UnstablePollEnd(c) => self.handle_poll_end(c), + AnyMessageLikeEventContent::CallInvite(_) => { + self.add(should_add, TimelineItemContent::CallInvite); + } + // TODO _ => { debug!( diff --git a/crates/matrix-sdk-ui/src/timeline/event_item/content/mod.rs b/crates/matrix-sdk-ui/src/timeline/event_item/content/mod.rs index 90de9430ee6..06e655209e2 100644 --- a/crates/matrix-sdk-ui/src/timeline/event_item/content/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/event_item/content/mod.rs @@ -19,6 +19,7 @@ use imbl::Vector; use matrix_sdk_base::latest_event::{is_suitable_for_latest_event, PossibleLatestEvent}; use ruma::{ events::{ + call::invite::SyncCallInviteEvent, policy::rule::{ room::PolicyRuleRoomEventContent, server::PolicyRuleServerEventContent, user::PolicyRuleUserEventContent, @@ -106,6 +107,9 @@ pub enum TimelineItemContent { /// An `m.poll.start` event. Poll(PollState), + + /// An `m.call.invite` event + CallInvite, } impl TimelineItemContent { @@ -122,6 +126,9 @@ impl TimelineItemContent { PossibleLatestEvent::YesPoll(poll) => { Some(Self::from_suitable_latest_poll_event_content(poll)) } + PossibleLatestEvent::YesCallInvite(call_invite) => { + Some(Self::from_suitable_latest_call_invite_content(call_invite)) + } PossibleLatestEvent::NoUnsupportedEventType => { // TODO: when we support state events in message previews, this will need change warn!("Found a state event cached as latest_event! ID={}", event.event_id()); @@ -189,6 +196,15 @@ impl TimelineItemContent { } } + fn from_suitable_latest_call_invite_content( + event: &SyncCallInviteEvent, + ) -> TimelineItemContent { + match event { + SyncCallInviteEvent::Original(_) => TimelineItemContent::CallInvite, + SyncCallInviteEvent::Redacted(_) => TimelineItemContent::RedactedMessage, + } + } + /// If `self` is of the [`Message`][Self::Message] variant, return the inner /// [`Message`]. pub fn as_message(&self) -> Option<&Message> { @@ -228,6 +244,7 @@ impl TimelineItemContent { TimelineItemContent::FailedToParseMessageLike { .. } | TimelineItemContent::FailedToParseState { .. } => "an event that couldn't be parsed", TimelineItemContent::Poll(_) => "a poll", + TimelineItemContent::CallInvite => "a call invite", } } @@ -306,6 +323,7 @@ impl TimelineItemContent { | Self::RedactedMessage | Self::Sticker(_) | Self::Poll(_) + | Self::CallInvite | Self::UnableToDecrypt(_) => Self::RedactedMessage, Self::MembershipChange(ev) => Self::MembershipChange(ev.redact(room_version)), Self::ProfileChange(ev) => Self::ProfileChange(ev.redact()), diff --git a/crates/matrix-sdk-ui/src/timeline/event_item/mod.rs b/crates/matrix-sdk-ui/src/timeline/event_item/mod.rs index 60ef4e75837..3e52d3fe655 100644 --- a/crates/matrix-sdk-ui/src/timeline/event_item/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/event_item/mod.rs @@ -507,15 +507,14 @@ pub enum EventItemOrigin { mod tests { use assert_matches::assert_matches; use assert_matches2::assert_let; - use matrix_sdk::{config::RequestConfig, Client, ClientBuilder}; + use matrix_sdk::test_utils::logged_in_client; use matrix_sdk_base::{ - deserialized_responses::SyncTimelineEvent, latest_event::LatestEvent, BaseClient, - MinimalStateEvent, OriginalMinimalStateEvent, SessionMeta, + deserialized_responses::SyncTimelineEvent, latest_event::LatestEvent, MinimalStateEvent, + OriginalMinimalStateEvent, }; use matrix_sdk_test::{async_test, sync_timeline_event}; use ruma::{ - api::{client::sync::sync_events::v4, MatrixVersion}, - device_id, + api::client::sync::sync_events::v4, events::{ room::{ member::RoomMemberEventContent, @@ -697,28 +696,4 @@ mod tests { }) .into() } - - /// Copied from matrix_sdk_base::sliding_sync::test - async fn logged_in_client(homeserver_url: Option) -> Client { - let base_client = BaseClient::new(); - base_client - .set_session_meta(SessionMeta { - user_id: user_id!("@u:e.uk").to_owned(), - device_id: device_id!("XYZ").to_owned(), - }) - .await - .expect("Failed to set session meta"); - - test_client_builder(homeserver_url) - .request_config(RequestConfig::new().disable_retry()) - .base_client(base_client) - .build() - .await - .unwrap() - } - - fn test_client_builder(homeserver_url: Option) -> ClientBuilder { - let homeserver = homeserver_url.as_deref().unwrap_or("http://localhost:1234"); - Client::builder().homeserver_url(homeserver).server_versions([MatrixVersion::V1_0]) - } } diff --git a/crates/matrix-sdk-ui/src/timeline/inner/mod.rs b/crates/matrix-sdk-ui/src/timeline/inner/mod.rs index 3f33bb02860..f4c5210fa59 100644 --- a/crates/matrix-sdk-ui/src/timeline/inner/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/inner/mod.rs @@ -24,11 +24,7 @@ use imbl::Vector; use itertools::Itertools; #[cfg(all(test, feature = "e2e-encryption"))] use matrix_sdk::crypto::OlmMachine; -use matrix_sdk::{ - deserialized_responses::{SyncTimelineEvent, TimelineEvent}, - sync::JoinedRoomUpdate, - Error, Result, Room, -}; +use matrix_sdk::{deserialized_responses::SyncTimelineEvent, Error, Result, Room}; #[cfg(test)] use ruma::events::receipt::ReceiptEventContent; #[cfg(all(test, feature = "e2e-encryption"))] @@ -45,9 +41,10 @@ use ruma::{ message::{MessageType, Relation}, redaction::RoomRedactionEventContent, }, - AnyMessageLikeEventContent, AnySyncMessageLikeEvent, AnySyncTimelineEvent, - MessageLikeEventType, + AnyMessageLikeEventContent, AnyRoomAccountDataEvent, AnySyncEphemeralRoomEvent, + AnySyncMessageLikeEvent, AnySyncTimelineEvent, MessageLikeEventType, }, + serde::Raw, EventId, OwnedEventId, OwnedTransactionId, RoomVersionId, TransactionId, UserId, }; use tokio::sync::{RwLock, RwLockWriteGuard}; @@ -59,19 +56,18 @@ use tracing::{field, info_span, Instrument as _}; use super::traits::Decryptor; use super::{ event_item::EventItemIdentifier, - pagination::PaginationTokens, reactions::ReactionToggleResult, traits::RoomDataProvider, util::{rfind_event_by_id, rfind_event_item, RelativePosition}, AnnotationKey, EventSendState, EventTimelineItem, InReplyToDetails, Message, Profile, RepliedToEvent, TimelineDetails, TimelineItem, TimelineItemContent, TimelineItemKind, }; -use crate::timeline::TimelineEventFilterFn; +use crate::{timeline::TimelineEventFilterFn, unable_to_decrypt_hook::UtdHookManager}; mod state; pub(super) use self::state::{ - EventMeta, FullEventMeta, TimelineInnerMetadata, TimelineInnerState, + EventMeta, FullEventMeta, TimelineEnd, TimelineInnerMetadata, TimelineInnerState, TimelineInnerStateTransaction, }; @@ -193,6 +189,7 @@ pub fn default_event_filter(event: &AnySyncTimelineEvent, room_version: &RoomVer | AnyMessageLikeEventContent::UnstablePollStart( UnstablePollStartEventContent::New(_), ) + | AnyMessageLikeEventContent::CallInvite(_) | AnyMessageLikeEventContent::RoomEncrypted(_) => true, _ => false, @@ -209,8 +206,12 @@ pub fn default_event_filter(event: &AnySyncTimelineEvent, room_version: &RoomVer } impl TimelineInner

{ - pub(super) fn new(room_data_provider: P) -> Self { - let state = TimelineInnerState::new(room_data_provider.room_version()); + pub(super) fn new( + room_data_provider: P, + unable_to_decrypt_hook: Option>, + ) -> Self { + let state = + TimelineInnerState::new(room_data_provider.room_version(), unable_to_decrypt_hook); Self { state: Arc::new(RwLock::new(state)), room_data_provider, @@ -298,7 +299,8 @@ impl TimelineInner

{ (None, None) => { // No record of the reaction, create a local echo - let in_flight = state.in_flight_reaction.get::(&annotation.into()); + let in_flight = + state.meta.in_flight_reaction.get::(&annotation.into()); let txn_id = match in_flight { Some(ReactionState::Sending(txn_id)) => { // Use the transaction ID as the in flight request @@ -342,10 +344,10 @@ impl TimelineInner

{ } }; - state.reaction_state.insert(annotation.into(), reaction_state.clone()); + state.meta.reaction_state.insert(annotation.into(), reaction_state.clone()); // Check the action to perform depending on any in flight request - let in_flight = state.in_flight_reaction.get::(&annotation.into()); + let in_flight = state.meta.in_flight_reaction.get::(&annotation.into()); let result = match in_flight { Some(_) => { // There is an in-flight request @@ -370,7 +372,7 @@ impl TimelineInner

{ ReactionAction::None => {} ReactionAction::SendRemote(_) | ReactionAction::RedactRemote(_) => { // Remember the new in flight request - state.in_flight_reaction.insert(annotation.into(), reaction_state); + state.meta.in_flight_reaction.insert(annotation.into(), reaction_state); } }; @@ -396,7 +398,7 @@ impl TimelineInner

{ } if let Some(read_receipt) = read_receipt { - self.state.write().await.read_receipts.upsert_latest( + self.state.write().await.meta.read_receipts.upsert_latest( own_user_id, receipt_type, read_receipt, @@ -404,39 +406,55 @@ impl TimelineInner

{ } } - pub(super) async fn add_initial_events( - &mut self, - events: Vec, - back_pagination_token: Option, - ) { + /// Handle a list of events at the given end of the timeline. + /// + /// Returns the number of timeline updates that were made. + pub(super) async fn add_events_at( + &self, + events: Vec>, + position: TimelineEnd, + ) -> HandleManyEventsResult { if events.is_empty() { - return; + return Default::default(); } let mut state = self.state.write().await; - state - .add_initial_events( - events, - back_pagination_token, - &self.room_data_provider, - &self.settings, - ) - .await; + state.add_events_at(events, position, &self.room_data_provider, &self.settings).await } pub(super) async fn clear(&self) { self.state.write().await.clear(); } - pub(super) async fn handle_joined_room_update(&self, update: JoinedRoomUpdate) { + pub(super) async fn handle_sync_events( + &self, + events: Vec, + account_data: Vec>, + ephemeral: Vec>, + ) { let mut state = self.state.write().await; - state.handle_joined_room_update(update, &self.room_data_provider, &self.settings).await; + state + .handle_sync_events( + events, + account_data, + ephemeral, + &self.room_data_provider, + &self.settings, + ) + .await; } #[cfg(test)] pub(super) async fn handle_live_event(&self, event: SyncTimelineEvent) { let mut state = self.state.write().await; - state.handle_live_event(event, &self.room_data_provider, &self.settings).await; + state + .add_events_at( + vec![event], + TimelineEnd::Back { from_cache: false }, + &self.room_data_provider, + &self.settings, + ) + .await; } /// Handle the creation of a new local event. @@ -585,6 +603,7 @@ impl TimelineInner

{ let annotation_key: AnnotationKey = annotation.into(); let reaction_state = state + .meta .reaction_state .get(&AnnotationKey::from(annotation)) .expect("Reaction state should be set before sending the reaction"); @@ -593,6 +612,7 @@ impl TimelineInner

{ (ReactionToggleResult::AddSuccess { event_id, .. }, ReactionState::Redacting(_)) => { // A reaction was added successfully but we've been requested to undo it state + .meta .in_flight_reaction .insert(annotation_key, ReactionState::Redacting(Some(event_id.to_owned()))); ReactionAction::RedactRemote(event_id.to_owned()) @@ -601,14 +621,15 @@ impl TimelineInner

{ // A reaction was was redacted successfully but we've been requested to undo it let txn_id = txn_id.to_owned(); state + .meta .in_flight_reaction .insert(annotation_key, ReactionState::Sending(txn_id.clone())); ReactionAction::SendRemote(txn_id) } _ => { // We're done, so also update the timeline - state.in_flight_reaction.swap_remove(&annotation_key); - state.reaction_state.swap_remove(&annotation_key); + state.meta.in_flight_reaction.swap_remove(&annotation_key); + state.meta.reaction_state.swap_remove(&annotation_key); state.update_timeline_reaction(user_id, annotation, result)?; ReactionAction::None @@ -672,52 +693,6 @@ impl TimelineInner

{ } } - /// Get the back-pagination token of the first [`EventTimelineItem`]. - /// - /// Returns `None` if there are no `EventTimelineItem`s, or the first one - /// doesn't have a back-pagination token. - pub(super) async fn back_pagination_token(&self) -> Option { - let state = self.state.read().await; - Some(state.back_pagination_token()?.to_owned()) - } - - /// Handle a list of back-paginated events. - /// - /// Returns the number of timeline updates that were made. Short-circuits - /// and returns `None` if the number of items added or updated exceeds - /// `u16::MAX`, which should practically never happen. - /// - /// # Arguments - /// - /// * `events` - The events from back-pagination - /// - /// * `back_pagination_token` - The back-pagination token for loading - /// further events - pub(super) async fn handle_back_paginated_events( - &self, - events: Vec, - pagination_tokens: PaginationTokens, - ) -> Result { - let mut state = self.state.write().await; - if pagination_tokens.check_from { - if let Some(token) = pagination_tokens.from { - if state.back_pagination_token() != Some(&token) { - return Err(HandleBackPaginatedEventsError::TokenMismatch); - } - } - } - - state - .handle_back_paginated_events( - events, - pagination_tokens.to, - &self.room_data_provider, - &self.settings, - ) - .await - .ok_or(HandleBackPaginatedEventsError::ResultOverflow) - } - pub(super) async fn set_fully_read_event(&self, fully_read_event_id: OwnedEventId) { self.state.write().await.set_fully_read_event(fully_read_event_id); } @@ -785,11 +760,13 @@ impl TimelineInner

{ let settings = self.settings.clone(); let room_data_provider = self.room_data_provider.clone(); let push_rules_context = room_data_provider.push_rules_and_context().await; + let unable_to_decrypt_hook = state.meta.unable_to_decrypt_hook.clone(); matrix_sdk::executor::spawn(async move { let retry_one = |item: Arc| { let decryptor = decryptor.clone(); let should_retry = &should_retry; + let unable_to_decrypt_hook = unable_to_decrypt_hook.clone(); async move { let event_item = item.as_event()?; @@ -823,6 +800,12 @@ impl TimelineInner

{ trace!( "Successfully decrypted event that previously failed to decrypt" ); + + // Notify observers that we managed to eventually decrypt an event. + if let Some(hook) = unable_to_decrypt_hook { + hook.on_late_decrypt(&remote_event.event_id); + } + Some(event) } Err(e) => { @@ -1101,7 +1084,7 @@ impl TimelineInner { match receipt_type { SendReceiptType::Read => { if let Some((old_pub_read, _)) = - state.user_receipt(own_user_id, ReceiptType::Read, room).await + state.meta.user_receipt(own_user_id, ReceiptType::Read, room).await { trace!(%old_pub_read, "found a previous public receipt"); if let Some(relative_pos) = @@ -1148,31 +1131,20 @@ impl TimelineInner { /// it's folded into another timeline item. pub(crate) async fn latest_event_id(&self) -> Option { let state = self.state.read().await; - state.all_events.back().map(|event_meta| &event_meta.event_id).cloned() + state.meta.all_events.back().map(|event_meta| &event_meta.event_id).cloned() } } #[derive(Debug, Default)] pub(super) struct HandleManyEventsResult { - pub items_added: u16, - pub items_updated: u16, - pub back_pagination_token_updated: bool, -} - -#[derive(Debug)] -pub(in crate::timeline) enum HandleBackPaginatedEventsError { - /// The `from` token is not equal to the first event item's back-pagination - /// token. + /// The number of items that were added to the timeline. /// - /// This means that prepending the events from the back-pagination response - /// would result in a gap in the timeline. Back-pagination must be retried - /// with the current back-pagination token. - TokenMismatch, + /// Note one can't assume anything about the position at which those were + /// added. + pub items_added: u64, - /// `u16` overflow when computing the number of events affected. - /// - /// This *should* never happen. - ResultOverflow, + /// The number of items that were updated in the timeline. + pub items_updated: u64, } async fn fetch_replied_to_event( @@ -1201,7 +1173,7 @@ async fn fetch_replied_to_event( }); let event_item = item.with_content(TimelineItemContent::Message(reply), None); - let new_timeline_item = state.new_timeline_item(event_item); + let new_timeline_item = state.meta.new_timeline_item(event_item); state.items.set(index, new_timeline_item); // Don't hold the state lock while the network request is made diff --git a/crates/matrix-sdk-ui/src/timeline/inner/state.rs b/crates/matrix-sdk-ui/src/timeline/inner/state.rs index 2e315208b7e..f84647bc1cf 100644 --- a/crates/matrix-sdk-ui/src/timeline/inner/state.rs +++ b/crates/matrix-sdk-ui/src/timeline/inner/state.rs @@ -12,18 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{ - collections::VecDeque, - future::Future, - mem::{self, ManuallyDrop}, - ops::{Deref, DerefMut}, - sync::Arc, -}; +use std::{collections::VecDeque, future::Future, sync::Arc}; use eyeball_im::{ObservableVector, ObservableVectorTransaction, ObservableVectorTransactionEntry}; use indexmap::IndexMap; -use matrix_sdk::{deserialized_responses::SyncTimelineEvent, sync::Timeline}; -use matrix_sdk_base::{deserialized_responses::TimelineEvent, sync::JoinedRoomUpdate}; +use matrix_sdk::deserialized_responses::SyncTimelineEvent; +use matrix_sdk_base::deserialized_responses::TimelineEvent; #[cfg(test)] use ruma::events::receipt::ReceiptEventContent; use ruma::{ @@ -32,6 +26,7 @@ use ruma::{ AnyMessageLikeEventContent, AnyRoomAccountDataEvent, AnySyncEphemeralRoomEvent, }, push::Action, + serde::Raw, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedTransactionId, OwnedUserId, RoomVersionId, UserId, }; @@ -54,8 +49,24 @@ use crate::{ AnnotationKey, Error as TimelineError, Profile, ReactionSenderData, TimelineItem, TimelineItemKind, VirtualTimelineItem, }, + unable_to_decrypt_hook::UtdHookManager, }; +/// Which end of the timeline should an event be added to? +/// +/// This is a simplification of `TimelineItemPosition` which doesn't contain the +/// `Update` variant, when adding a bunch of events at the same time. +#[derive(Debug)] +pub(crate) enum TimelineEnd { + /// Event should be prepended to the front of the timeline. + Front, + /// Event should appended to the back of the timeline. + Back { + /// Did the event come from the cache? + from_cache: bool, + }, +} + #[derive(Debug)] pub(in crate::timeline) struct TimelineInnerState { pub items: ObservableVector>, @@ -63,65 +74,61 @@ pub(in crate::timeline) struct TimelineInnerState { } impl TimelineInnerState { - pub(super) fn new(room_version: RoomVersionId) -> Self { + pub(super) fn new( + room_version: RoomVersionId, + unable_to_decrypt_hook: Option>, + ) -> Self { Self { // Upstream default capacity is currently 16, which is making // sliding-sync tests with 20 events lag. This should still be // small enough. items: ObservableVector::with_capacity(32), - meta: TimelineInnerMetadata::new(room_version), + meta: TimelineInnerMetadata::new(room_version, unable_to_decrypt_hook), } } - pub(super) fn back_pagination_token(&self) -> Option<&str> { - let (_, token) = self.meta.back_pagination_tokens.front()?; - Some(token) - } - - #[tracing::instrument(skip_all)] - pub(super) async fn add_initial_events( + /// Add the given events at the given end of the timeline. + #[tracing::instrument(skip(self, events, room_data_provider, settings))] + pub(super) async fn add_events_at( &mut self, - events: Vec, - mut back_pagination_token: Option, + events: Vec>, + position: TimelineEnd, room_data_provider: &P, settings: &TimelineInnerSettings, - ) { - debug!("Adding {} initial events", events.len()); + ) -> HandleManyEventsResult { + if events.is_empty() { + return Default::default(); + } let mut txn = self.transaction(); - for event in events { - let (event_id, _) = txn - .handle_remote_event( - event, - TimelineItemPosition::End { from_cache: true }, - room_data_provider, - settings, - ) - .await; - - // Back-pagination token, if any, is added to the first added event. - if let Some(event_id) = event_id { - if let Some(token) = back_pagination_token.take() { - trace!(token, ?event_id, "Adding back-pagination token to the back"); - txn.meta.back_pagination_tokens.push_back((event_id, token)); - } - } - } + let handle_many_res = + txn.add_events_at(events, position, room_data_provider, settings).await; txn.commit(); + + handle_many_res } #[instrument(skip_all)] - pub(super) async fn handle_joined_room_update( + pub(super) async fn handle_sync_events( &mut self, - update: JoinedRoomUpdate, + events: Vec, + account_data: Vec>, + ephemeral: Vec>, room_data_provider: &P, settings: &TimelineInnerSettings, ) { let mut txn = self.transaction(); - txn.handle_sync_timeline(update.timeline, room_data_provider, settings).await; + + txn.add_events_at( + events, + TimelineEnd::Back { from_cache: false }, + room_data_provider, + settings, + ) + .await; trace!("Handling account data"); - for raw_event in update.account_data { + for raw_event in account_data { match raw_event.deserialize() { Ok(AnyRoomAccountDataEvent::FullyRead(ev)) => { txn.set_fully_read_event(ev.content.event_id); @@ -134,10 +141,10 @@ impl TimelineInnerState { } } - if !update.ephemeral.is_empty() { + if !ephemeral.is_empty() { trace!("Handling ephemeral room events"); let own_user_id = room_data_provider.own_user_id(); - for raw_event in update.ephemeral { + for raw_event in ephemeral { match raw_event.deserialize() { Ok(AnySyncEphemeralRoomEvent::Receipt(ev)) => { txn.handle_explicit_read_receipts(ev.content, own_user_id); @@ -154,57 +161,6 @@ impl TimelineInnerState { txn.commit(); } - #[instrument(skip_all)] - pub(super) async fn handle_back_paginated_events( - &mut self, - events: Vec, - back_pagination_token: Option, - room_data_provider: &P, - settings: &TimelineInnerSettings, - ) -> Option { - let mut txn = self.transaction(); - - let mut latest_event_id = None; - let mut total = HandleManyEventsResult::default(); - for event in events { - let (event_id, res) = txn - .handle_remote_event( - event.into(), - TimelineItemPosition::Start, - room_data_provider, - settings, - ) - .await; - - latest_event_id = event_id.or(latest_event_id); - total.items_added = total.items_added.checked_add(res.item_added as u16)?; - total.items_updated = total.items_updated.checked_add(res.items_updated)?; - } - - // Back-pagination token, if any, is added to the last added event. - if let Some((event_id, token)) = latest_event_id.zip(back_pagination_token) { - trace!(token, ?event_id, "Adding back-pagination token to the front"); - txn.meta.back_pagination_tokens.push_front((event_id, token)); - total.back_pagination_token_updated = true; - } - - txn.commit(); - - Some(total) - } - - #[cfg(test)] - pub(super) async fn handle_live_event( - &mut self, - event: SyncTimelineEvent, - room_data_provider: &P, - settings: &TimelineInnerSettings, - ) { - let mut txn = self.transaction(); - txn.handle_live_event(event, room_data_provider, settings).await; - txn.commit(); - } - /// Handle the creation of a new local event. pub(super) fn handle_local_event( &mut self, @@ -301,7 +257,7 @@ impl TimelineInnerState { push_rules.get_actions(&event.event, push_context).to_owned() }); - let (_, result) = txn + let handle_one_res = txn .handle_remote_event( event.into(), TimelineItemPosition::Update(idx), @@ -312,7 +268,7 @@ impl TimelineInnerState { // If the UTD was removed rather than updated, offset all // subsequent loop iterations. - if result.item_removed { + if handle_one_res.item_removed { offset += 1; } } @@ -442,74 +398,53 @@ impl TimelineInnerState { } fn transaction(&mut self) -> TimelineInnerStateTransaction<'_> { - let items = ManuallyDrop::new(self.items.transaction()); - TimelineInnerStateTransaction { items, meta: &mut self.meta } + let items = self.items.transaction(); + let meta = self.meta.clone(); + TimelineInnerStateTransaction { items, previous_meta: &mut self.meta, meta } } } -impl Deref for TimelineInnerState { - type Target = TimelineInnerMetadata; - - fn deref(&self) -> &Self::Target { - &self.meta - } -} +pub(in crate::timeline) struct TimelineInnerStateTransaction<'a> { + /// A vector transaction over the items themselves. Holds temporary state + /// until committed. + pub items: ObservableVectorTransaction<'a, Arc>, -impl DerefMut for TimelineInnerState { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.meta - } -} + /// A clone of the previous meta, that we're operating on during the + /// transaction, and that will be committed to the previous meta location in + /// [`Self::commit`]. + pub meta: TimelineInnerMetadata, -pub(in crate::timeline) struct TimelineInnerStateTransaction<'a> { - pub items: ManuallyDrop>>, - pub meta: &'a mut TimelineInnerMetadata, + /// Pointer to the previous meta, only used during [`Self::commit`]. + previous_meta: &'a mut TimelineInnerMetadata, } impl TimelineInnerStateTransaction<'_> { - #[instrument(skip_all, fields(limited = timeline.limited))] - async fn handle_sync_timeline( + /// Add the given events at the given end of the timeline. + #[tracing::instrument(skip(self, events, room_data_provider, settings))] + pub(super) async fn add_events_at( &mut self, - mut timeline: Timeline, + events: Vec>, + position: TimelineEnd, room_data_provider: &P, settings: &TimelineInnerSettings, - ) { - if timeline.limited { - self.clear(); - } + ) -> HandleManyEventsResult { + let mut total = HandleManyEventsResult::default(); - let num_events = timeline.events.len(); - for (i, event) in timeline.events.into_iter().enumerate() { - trace!("Handling event {} out of {num_events}", i + 1); - let (event_id, _) = self.handle_live_event(event, room_data_provider, settings).await; + let position = match position { + TimelineEnd::Front => TimelineItemPosition::Start, + TimelineEnd::Back { from_cache } => TimelineItemPosition::End { from_cache }, + }; - // Back-pagination token, if any, is added to the first added event. - if let Some(event_id) = event_id { - if let Some(token) = timeline.prev_batch.take() { - trace!(token, ?event_id, "Adding back-pagination token to the back"); - self.meta.back_pagination_tokens.push_back((event_id, token)); - } - } + for event in events { + let handle_one_res = self + .handle_remote_event(event.into(), position, room_data_provider, settings) + .await; + + total.items_added += handle_one_res.item_added as u64; + total.items_updated += handle_one_res.items_updated as u64; } - } - /// Handle a live remote event. - /// - /// Shorthand for `handle_remote_event` with a `position` of - /// `TimelineItemPosition::End { from_cache: false }`. - async fn handle_live_event( - &mut self, - event: SyncTimelineEvent, - room_data_provider: &P, - settings: &TimelineInnerSettings, - ) -> (Option, HandleEventResult) { - self.handle_remote_event( - event, - TimelineItemPosition::End { from_cache: false }, - room_data_provider, - settings, - ) - .await + total } /// Handle a remote event. @@ -521,7 +456,7 @@ impl TimelineInnerStateTransaction<'_> { position: TimelineItemPosition, room_data_provider: &P, settings: &TimelineInnerSettings, - ) -> (Option, HandleEventResult) { + ) -> HandleEventResult { let raw = event.event; let (event_id, sender, timestamp, txn_id, event_kind, should_add) = match raw.deserialize() { @@ -563,7 +498,7 @@ impl TimelineInnerStateTransaction<'_> { }; self.add_event(event_meta, position, room_data_provider, settings).await; - return (Some(event_id.to_owned()), HandleEventResult::default()); + return HandleEventResult::default(); } Err(e) => { @@ -589,7 +524,7 @@ impl TimelineInnerStateTransaction<'_> { self.add_event(event_meta, position, room_data_provider, settings).await; } - return (event_id, HandleEventResult::default()); + return HandleEventResult::default(); } }, }; @@ -615,7 +550,7 @@ impl TimelineInnerStateTransaction<'_> { read_receipts: if settings.track_read_receipts && should_add { self.meta.read_receipts.compute_event_receipts( &event_id, - &self.all_events, + &self.meta.all_events, matches!(position, TimelineItemPosition::End { .. }), ) } else { @@ -631,8 +566,7 @@ impl TimelineInnerStateTransaction<'_> { }, }; - let result = TimelineEventHandler::new(self, ctx).handle_event(event_kind); - (Some(event_id), result) + TimelineEventHandler::new(self, ctx).handle_event(event_kind) } fn clear(&mut self) { @@ -665,14 +599,13 @@ impl TimelineInnerStateTransaction<'_> { self.items.clear(); } - self.all_events.clear(); - self.read_receipts.clear(); - self.reactions.clear(); - self.fully_read_event = None; + self.meta.all_events.clear(); + self.meta.read_receipts.clear(); + self.meta.reactions.clear(); + self.meta.fully_read_event = None; // We forgot about the fully read marker right above, so wait for a new one // before attempting to update it for each new timeline item. - self.has_up_to_date_read_marker_item = true; - self.back_pagination_tokens.clear(); + self.meta.has_up_to_date_read_marker_item = true; debug!(remaining_items = self.items.len(), "Timeline cleared"); } @@ -680,24 +613,19 @@ impl TimelineInnerStateTransaction<'_> { #[instrument(skip_all)] fn set_fully_read_event(&mut self, fully_read_event_id: OwnedEventId) { // A similar event has been handled already. We can ignore it. - if self.fully_read_event.as_ref().is_some_and(|id| *id == fully_read_event_id) { + if self.meta.fully_read_event.as_ref().is_some_and(|id| *id == fully_read_event_id) { return; } - self.fully_read_event = Some(fully_read_event_id); + self.meta.fully_read_event = Some(fully_read_event_id); self.meta.update_read_marker(&mut self.items); } - fn commit(mut self) { - let Self { - items, - // meta is just a reference, does not any dropping - meta: _, - } = &mut self; + fn commit(self) { + let Self { items, previous_meta, meta } = self; - // Safety: self is forgotten to avoid double free from drop - let items = unsafe { ManuallyDrop::take(items) }; - mem::forget(self); + // Replace the pointer to the previous meta with the new one. + *previous_meta = meta; items.commit(); } @@ -710,21 +638,21 @@ impl TimelineInnerStateTransaction<'_> { settings: &TimelineInnerSettings, ) { match position { - TimelineItemPosition::Start => self.all_events.push_front(event_meta.base_meta()), + TimelineItemPosition::Start => self.meta.all_events.push_front(event_meta.base_meta()), TimelineItemPosition::End { .. } => { // Handle duplicated event. if let Some(pos) = - self.all_events.iter().position(|ev| ev.event_id == event_meta.event_id) + self.meta.all_events.iter().position(|ev| ev.event_id == event_meta.event_id) { - self.all_events.remove(pos); + self.meta.all_events.remove(pos); } - self.all_events.push_back(event_meta.base_meta()); + self.meta.all_events.push_back(event_meta.base_meta()); } #[cfg(feature = "e2e-encryption")] TimelineItemPosition::Update(_) => { if let Some(event) = - self.all_events.iter_mut().find(|e| e.event_id == event_meta.event_id) + self.meta.all_events.iter_mut().find(|e| e.event_id == event_meta.event_id) { if event.visible != event_meta.visible { event.visible = event_meta.visible; @@ -749,32 +677,7 @@ impl TimelineInnerStateTransaction<'_> { } } -impl Drop for TimelineInnerStateTransaction<'_> { - fn drop(&mut self) { - warn!("timeline state transaction cancelled"); - // Safety: self.items is not touched anymore, the only other place - // dropping is Self::commit which makes sure to skip this Drop impl. - unsafe { - ManuallyDrop::drop(&mut self.items); - } - } -} - -impl Deref for TimelineInnerStateTransaction<'_> { - type Target = TimelineInnerMetadata; - - fn deref(&self) -> &Self::Target { - self.meta - } -} - -impl DerefMut for TimelineInnerStateTransaction<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.meta - } -} - -#[derive(Debug)] +#[derive(Clone, Debug)] pub(in crate::timeline) struct TimelineInnerMetadata { /// List of all the events as received in the timeline, even the ones that /// are discarded in the timeline items. @@ -799,17 +702,18 @@ pub(in crate::timeline) struct TimelineInnerMetadata { pub reaction_state: IndexMap, /// The in-flight reaction request state that is ongoing. pub in_flight_reaction: IndexMap, - pub room_version: RoomVersionId, - /// Back-pagination tokens, in the same order as the associated timeline - /// items. - /// - /// Private because it's not needed by `TimelineEventHandler`. - back_pagination_tokens: VecDeque<(OwnedEventId, String)>, + /// The hook to call whenever we run into a unable-to-decrypt event. + pub(crate) unable_to_decrypt_hook: Option>, + + pub room_version: RoomVersionId, } impl TimelineInnerMetadata { - fn new(room_version: RoomVersionId) -> TimelineInnerMetadata { + fn new( + room_version: RoomVersionId, + unable_to_decrypt_hook: Option>, + ) -> Self { Self { all_events: Default::default(), next_internal_id: Default::default(), @@ -823,7 +727,7 @@ impl TimelineInnerMetadata { reaction_state: Default::default(), in_flight_reaction: Default::default(), room_version, - back_pagination_tokens: VecDeque::new(), + unable_to_decrypt_hook, } } diff --git a/crates/matrix-sdk-ui/src/timeline/mod.rs b/crates/matrix-sdk-ui/src/timeline/mod.rs index e7a87f158c2..85a50022a18 100644 --- a/crates/matrix-sdk-ui/src/timeline/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/mod.rs @@ -16,7 +16,7 @@ //! //! See [`Timeline`] for details. -use std::{ops::ControlFlow, pin::Pin, sync::Arc, task::Poll}; +use std::{pin::Pin, sync::Arc, task::Poll}; use eyeball::{SharedObservable, Subscriber}; use eyeball_im::VectorDiff; @@ -24,6 +24,7 @@ use futures_core::Stream; use imbl::Vector; use matrix_sdk::{ attachment::AttachmentConfig, + event_cache::{EventCacheDropHandles, RoomEventCache}, event_handler::EventHandlerHandle, executor::JoinHandle, room::{Receipts, Room}, @@ -55,11 +56,10 @@ use ruma::{ TransactionId, UserId, }; use thiserror::Error; -use tokio::sync::{mpsc::Sender, Mutex, Notify}; -use tracing::{debug, error, info, instrument, trace, warn}; +use tokio::sync::mpsc::Sender; +use tracing::{debug, error, instrument, trace, warn}; use self::futures::SendAttachment; -use crate::event_cache::EventCacheDropHandles; mod builder; mod error; @@ -116,17 +116,21 @@ use self::{ /// messages. #[derive(Debug)] pub struct Timeline { + /// Clonable, inner fields of the `Timeline`, shared with some background + /// tasks. inner: TimelineInner, - /// Mutex that ensures only a single pagination is running at once - back_pagination_mtx: Mutex<()>, + /// The event cache specialized for this room's view. + event_cache: RoomEventCache, + /// Observable for whether a pagination is currently running back_pagination_status: SharedObservable, - /// Notifier for handled sync responses. - sync_response_notify: Arc, - + /// A sender to the task which responsibility is to send messages to the + /// current room. msg_sender: Sender, + + /// References to long-running tasks held by the timeline. drop_handle: Arc, } @@ -163,37 +167,6 @@ impl Timeline { self.back_pagination_status.subscribe() } - /// Add more events to the start of the timeline. - #[instrument(skip_all, fields(room_id = ?self.room().room_id(), ?options))] - pub async fn paginate_backwards(&self, options: PaginationOptions<'_>) -> Result<()> { - if self.back_pagination_status.get() == BackPaginationStatus::TimelineStartReached { - warn!("Start of timeline reached, ignoring backwards-pagination request"); - return Ok(()); - } - - // Ignore extra back pagination requests if one is already running. - let Ok(_guard) = self.back_pagination_mtx.try_lock() else { - info!("Couldn't acquire pack pagination mutex, another request must be running"); - return Ok(()); - }; - - loop { - match self.paginate_backwards_impl(options.clone()).await { - Ok(ControlFlow::Continue(())) => { - // fall through and continue the loop - } - Ok(ControlFlow::Break(status)) => { - self.back_pagination_status.set_if_not_eq(status); - return Ok(()); - } - Err(e) => { - self.back_pagination_status.set_if_not_eq(BackPaginationStatus::Idle); - return Err(e); - } - } - } - } - /// Retry decryption of previously un-decryptable events given a list of /// session IDs whose keys have been imported. /// @@ -598,6 +571,9 @@ impl Timeline { TimelineItemContent::Poll(poll_state) => AnyMessageLikeEventContent::UnstablePollStart( UnstablePollStartEventContent::New(poll_state.into()), ), + TimelineItemContent::CallInvite => { + error_return!("Retrying call events is not currently supported"); + } }; debug!("Retrying failed local echo"); diff --git a/crates/matrix-sdk-ui/src/timeline/pagination.rs b/crates/matrix-sdk-ui/src/timeline/pagination.rs index fd5f5c422ec..dcb959861cb 100644 --- a/crates/matrix-sdk-ui/src/timeline/pagination.rs +++ b/crates/matrix-sdk-ui/src/timeline/pagination.rs @@ -12,186 +12,91 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{fmt, ops::ControlFlow, pin::pin, sync::Arc, time::Duration}; +use std::{fmt, ops::ControlFlow, sync::Arc, time::Duration}; -use matrix_sdk::{room::MessagesOptions, Result}; -use matrix_sdk_base::timeout::timeout; -use ruma::assign; -use tracing::{error, info, instrument, trace, warn}; +use matrix_sdk::event_cache::{self, BackPaginationOutcome}; +use tracing::{instrument, trace, warn}; -use super::{inner::HandleBackPaginatedEventsError, Timeline}; +use crate::timeline::inner::TimelineEnd; -impl Timeline { - /// Run back-pagination. - /// - /// Returns `Ok(ControlFlow::Continue(()))` if back-pagination should be - /// retried because the timeline was reset while a pagination request was - /// in-flight. - /// - /// Returns `Ok(ControlFlow::Break(status))` if back-pagination succeeded, - /// where `status` is the resulting back-pagination status, either - /// [`Idle`][BackPaginationStatus::Idle] or - /// [`TimelineStartReached`][BackPaginationStatus::TimelineStartReached]. - /// - /// Returns `Err(_)` if the a pagination request failed. This doesn't mean - /// that no events were added to the timeline though, it is possible that - /// one or more pagination requests succeeded before the failure. - pub(super) async fn paginate_backwards_impl( +impl super::Timeline { + /// Add more events to the start of the timeline. + #[instrument(skip_all, fields(room_id = ?self.room().room_id(), ?options))] + pub async fn paginate_backwards( &self, mut options: PaginationOptions<'_>, - ) -> Result> { - // How long to wait for the back-pagination token to be set if the - // `wait_for_token` option is set + ) -> event_cache::Result<()> { + if self.back_pagination_status.get() == BackPaginationStatus::TimelineStartReached { + warn!("Start of timeline reached, ignoring backwards-pagination request"); + return Ok(()); + } + + if self.back_pagination_status.set_if_not_eq(BackPaginationStatus::Paginating).is_none() { + warn!("Another back-pagination is already running in the background"); + return Ok(()); + } + + // The first time, we allow to wait a bit for *a* back-pagination token to come + // over via sync. const WAIT_FOR_TOKEN_TIMEOUT: Duration = Duration::from_secs(3); - let mut from = match self.inner.back_pagination_token().await { - None if options.wait_for_token => { - trace!("Waiting for back-pagination token from sync..."); - - let wait_for_token = pin!(async { - loop { - self.sync_response_notify.notified().await; - match self.inner.back_pagination_token().await { - Some(token) => break token, - None => { - warn!( - "Sync response without prev_batch received, \ - continuing to wait" - ); - // fall through and continue the loop - } + let mut token = + self.event_cache.oldest_backpagination_token(Some(WAIT_FOR_TOKEN_TIMEOUT)).await?; + + let initial_options = options.clone(); + let mut outcome = PaginationOutcome::default(); + + while let Some(batch_size) = options.next_event_limit(outcome) { + loop { + match self.event_cache.backpaginate_with_token(batch_size, token).await? { + BackPaginationOutcome::Success { events, reached_start } => { + let num_events = events.len(); + trace!("Back-pagination succeeded with {num_events} events"); + + let handle_many_res = + self.inner.add_events_at(events, TimelineEnd::Front).await; + + if reached_start { + self.back_pagination_status + .set_if_not_eq(BackPaginationStatus::TimelineStartReached); + return Ok(()); } - } - }); - match timeout(wait_for_token, WAIT_FOR_TOKEN_TIMEOUT).await { - Ok(token) => Some(token), - Err(_) => { - warn!("Waiting for prev_batch token timed out after 3s"); - None - } - } - } - token => token, - }; + outcome.events_received = num_events as u64; + outcome.total_events_received += outcome.events_received; - self.back_pagination_status.set_if_not_eq(BackPaginationStatus::Paginating); + outcome.items_added = handle_many_res.items_added; + outcome.items_updated = handle_many_res.items_updated; + outcome.total_items_added += outcome.items_added; + outcome.total_items_updated += outcome.items_updated; - let mut outcome = PaginationOutcome::new(); - while let Some(limit) = options.next_event_limit(outcome) { - match self.paginate_backwards_until_new_token(limit, from, &mut outcome).await? { - PaginateBackwardsOnceResult::Success { from: None, .. } => { - trace!("Start of timeline was reached"); - return Ok(ControlFlow::Break(BackPaginationStatus::TimelineStartReached)); - } - PaginateBackwardsOnceResult::Success { from: Some(f), .. } => { - from = Some(f); - // fall through and continue the loop - } - PaginateBackwardsOnceResult::TokenMismatch => { - info!("Head of timeline was altered since pagination was started, resetting"); - return Ok(ControlFlow::Continue(())); - } - PaginateBackwardsOnceResult::ResultOverflow => { - error!("Received an excessive number of events, ending pagination"); - break; + if num_events == 0 { + // As an exceptional contract: if there were no events in the response, + // see if we had another back-pagination token, and retry the request. + token = self.event_cache.oldest_backpagination_token(None).await?; + continue; + } + } + + BackPaginationOutcome::UnknownBackpaginationToken => { + // The token has been lost. + // It's possible the timeline has been cleared; restart the whole + // back-pagination. + outcome = Default::default(); + options = initial_options.clone(); + } } - }; - } - Ok(ControlFlow::Break(BackPaginationStatus::Idle)) - } + // Retrieve the next earliest back-pagination token. + token = self.event_cache.oldest_backpagination_token(None).await?; - /// Do back-pagination requests until the back-pagination token is updated. - #[instrument(skip(self, outcome))] - async fn paginate_backwards_until_new_token( - &self, - limit: u16, - mut from: Option, - outcome: &mut PaginationOutcome, - ) -> Result { - let mut check_from = true; - loop { - match self.paginate_backwards_once(limit, from, check_from, outcome).await? { - PaginateBackwardsOnceResult::Success { - from: Some(f), - back_pagination_token_updated, - } if !back_pagination_token_updated => { - trace!("Back-pagination token not updated"); - from = Some(f); - check_from = false; - } - res => return Ok(res), + // Exit the inner loop, and ask for another limit. + break; } } - } - /// Do a single back-pagination request. - /// - /// Returns `Ok(ControlFlow::Continue(true))` if back-pagination should be - /// retried because the timeline was reset while a pagination request - /// was in-flight. - /// - /// Returns `Ok(ControlFlow::Break(status))` if back-pagination succeeded, - /// where `status` is the resulting back-pagination status, either - /// [`Idle`][BackPaginationStatus::Idle] or - /// [`TimelineStartReached`][BackPaginationStatus::TimelineStartReached]. - /// - /// Returns `Err(_)` if the a pagination request failed. This doesn't mean - /// that no events were added to the timeline though, it is possible that - /// one or more pagination requests succeeded before the failure. - async fn paginate_backwards_once( - &self, - limit: u16, - from: Option, - check_from: bool, - outcome: &mut PaginationOutcome, - ) -> Result { - trace!("Requesting messages"); - - let messages = self - .room() - .messages(assign!(MessagesOptions::backward(), { - from: from.clone(), - limit: limit.into(), - })) - .await?; - let chunk_len = messages.chunk.len(); - - let tokens = PaginationTokens { from, check_from, to: messages.end.clone() }; - let res = match self.inner.handle_back_paginated_events(messages.chunk, tokens).await { - Ok(result) => result, - Err(HandleBackPaginatedEventsError::TokenMismatch) => { - return Ok(PaginateBackwardsOnceResult::TokenMismatch); - } - Err(HandleBackPaginatedEventsError::ResultOverflow) => { - return Ok(PaginateBackwardsOnceResult::ResultOverflow); - } - }; - - // FIXME: Change to try block once stable - let mut update_outcome = || { - outcome.events_received = chunk_len.try_into().ok()?; - outcome.total_events_received = - outcome.total_events_received.checked_add(outcome.events_received)?; - - outcome.items_added = res.items_added; - outcome.items_updated = res.items_updated; - outcome.total_items_added = - outcome.total_items_added.checked_add(outcome.items_added)?; - outcome.total_items_updated = - outcome.total_items_updated.checked_add(outcome.items_updated)?; - - Some(()) - }; - - Ok(match update_outcome() { - Some(()) => PaginateBackwardsOnceResult::Success { - from: messages.end, - back_pagination_token_updated: res.back_pagination_token_updated, - }, - None => PaginateBackwardsOnceResult::ResultOverflow, - }) + self.back_pagination_status.set_if_not_eq(BackPaginationStatus::Idle); + Ok(()) } } @@ -265,7 +170,7 @@ impl<'a> PaginationOptions<'a> { event_limit_if_first.take() } PaginationOptionsInner::UntilNumItems { items, event_limit } => { - (pagination_outcome.total_items_added < *items).then_some(*event_limit) + (pagination_outcome.total_items_added < *items as u64).then_some(*event_limit) } PaginationOptionsInner::Custom { event_limit_if_first, strategy } => { event_limit_if_first.take().or_else(|| match strategy(pagination_outcome) { @@ -322,38 +227,25 @@ impl<'a> fmt::Debug for PaginationOptions<'a> { #[non_exhaustive] pub struct PaginationOutcome { /// The number of events received in last pagination response. - pub events_received: u16, + pub events_received: u64, /// The number of timeline items added by the last pagination response. - pub items_added: u16, + pub items_added: u64, /// The number of timeline items updated by the last pagination /// response. - pub items_updated: u16, + pub items_updated: u64, /// The number of events received by a `paginate_backwards` call so far. - pub total_events_received: u16, + pub total_events_received: u64, /// The total number of items added by a `paginate_backwards` call so /// far. - pub total_items_added: u16, + pub total_items_added: u64, /// The total number of items updated by a `paginate_backwards` call so /// far. - pub total_items_updated: u16, -} - -impl PaginationOutcome { - pub(super) fn new() -> Self { - Self { - events_received: 0, - items_added: 0, - items_updated: 0, - total_events_received: 0, - total_items_added: 0, - total_items_updated: 0, - } - } + pub total_items_updated: u64, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -364,37 +256,6 @@ pub enum BackPaginationStatus { TimelineStartReached, } -#[derive(Default)] -pub(super) struct PaginationTokens { - /// The `from` parameter of the pagination request. - pub from: Option, - /// Whether to check the `from` token against the latest back-pagination - /// token. - pub check_from: bool, - /// The `end` parameter of the pagination response. - pub to: Option, -} - -/// The `Ok` result of `paginate_backwards_once`. -enum PaginateBackwardsOnceResult { - /// Success, the items from the response were prepended. - Success { - /// The back-pagination token for the next batch. - from: Option, - /// Whether to back-pagination token was updated. - back_pagination_token_updated: bool, - }, - /// The `from` token is not equal to the first event item's back-pagination - /// token. - /// - /// This means that prepending the events from the back-pagination response - /// would result in a gap in the timeline. Back-pagination must be retried - /// with the current back-pagination token. - TokenMismatch, - /// Overflow in reporting the number of events / items processed. - ResultOverflow, -} - #[cfg(test)] mod tests { use std::{ @@ -416,7 +277,7 @@ mod tests { #[test] fn simple_request_limits() { let mut opts = PaginationOptions::simple_request(10); - let mut outcome = PaginationOutcome::new(); + let mut outcome = PaginationOutcome::default(); assert_eq!(opts.next_event_limit(outcome), Some(10)); bump_outcome(&mut outcome); @@ -426,7 +287,7 @@ mod tests { #[test] fn until_num_items_limits() { let mut opts = PaginationOptions::until_num_items(10, 10); - let mut outcome = PaginationOutcome::new(); + let mut outcome = PaginationOutcome::default(); assert_eq!(opts.next_event_limit(outcome), Some(10)); bump_outcome(&mut outcome); @@ -447,7 +308,7 @@ mod tests { ControlFlow::Break(()) } }); - let mut outcome = PaginationOutcome::new(); + let mut outcome = PaginationOutcome::default(); assert_eq!(opts.next_event_limit(outcome), Some(8)); bump_outcome(&mut outcome); diff --git a/crates/matrix-sdk-ui/src/timeline/polls.rs b/crates/matrix-sdk-ui/src/timeline/polls.rs index 15ac975cf42..23647c6b819 100644 --- a/crates/matrix-sdk-ui/src/timeline/polls.rs +++ b/crates/matrix-sdk-ui/src/timeline/polls.rs @@ -141,7 +141,7 @@ impl From for NewUnstablePollStartEventContent { /// Acts as a cache for poll response and poll end events handled before their /// start event has been handled. -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub(super) struct PollPendingEvents { pub(super) pending_poll_responses: HashMap>, pub(super) pending_poll_ends: HashMap, diff --git a/crates/matrix-sdk-ui/src/timeline/reactions.rs b/crates/matrix-sdk-ui/src/timeline/reactions.rs index a0b3c893c19..37ff0e8d96c 100644 --- a/crates/matrix-sdk-ui/src/timeline/reactions.rs +++ b/crates/matrix-sdk-ui/src/timeline/reactions.rs @@ -33,7 +33,7 @@ pub struct ReactionSenderData { pub timestamp: MilliSecondsSinceUnixEpoch, } -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub(super) struct Reactions { /// Reaction event / txn ID => sender and reaction data. pub(super) map: HashMap, diff --git a/crates/matrix-sdk-ui/src/timeline/read_receipts.rs b/crates/matrix-sdk-ui/src/timeline/read_receipts.rs index 06723096ec5..0b105d70820 100644 --- a/crates/matrix-sdk-ui/src/timeline/read_receipts.rs +++ b/crates/matrix-sdk-ui/src/timeline/read_receipts.rs @@ -404,6 +404,7 @@ impl TimelineInnerStateTransaction<'_> { receipt_type: ReceiptType::Read, receipt: &receipt, }; + self.meta.read_receipts.maybe_update_read_receipt( full_receipt, user_id == own_user_id, @@ -446,6 +447,7 @@ impl TimelineInnerStateTransaction<'_> { pub(super) fn maybe_update_read_receipts_of_prev_event(&mut self, event_id: &EventId) { // Find the previous visible event, if there is one. let Some(prev_event_meta) = self + .meta .all_events .iter() .rev() @@ -474,9 +476,9 @@ impl TimelineInnerStateTransaction<'_> { return; }; - let read_receipts = self.read_receipts.compute_event_receipts( + let read_receipts = self.meta.read_receipts.compute_event_receipts( &remote_prev_event_item.event_id, - &self.all_events, + &self.meta.all_events, false, ); @@ -500,14 +502,15 @@ impl TimelineInnerState { room_data_provider: &P, ) -> Option<(OwnedEventId, Receipt)> { let public_read_receipt = - self.user_receipt(user_id, ReceiptType::Read, room_data_provider).await; + self.meta.user_receipt(user_id, ReceiptType::Read, room_data_provider).await; let private_read_receipt = - self.user_receipt(user_id, ReceiptType::ReadPrivate, room_data_provider).await; + self.meta.user_receipt(user_id, ReceiptType::ReadPrivate, room_data_provider).await; // Let's assume that a private read receipt should be more recent than a public // read receipt, otherwise there's no point in the private read receipt, // and use it as default. match self + .meta .compare_optional_receipts(public_read_receipt.as_ref(), private_read_receipt.as_ref()) { Ordering::Greater => public_read_receipt, @@ -524,22 +527,23 @@ impl TimelineInnerState { ) -> Option { // We only need to use the local map, since receipts for known events are // already loaded from the store. - let public_read_receipt = self.read_receipts.get_latest(user_id, &ReceiptType::Read); + let public_read_receipt = self.meta.read_receipts.get_latest(user_id, &ReceiptType::Read); let private_read_receipt = - self.read_receipts.get_latest(user_id, &ReceiptType::ReadPrivate); + self.meta.read_receipts.get_latest(user_id, &ReceiptType::ReadPrivate); // Let's assume that a private read receipt should be more recent than a public // read receipt, otherwise there's no point in the private read receipt, // and use it as default. let (latest_receipt_id, _) = - match self.compare_optional_receipts(public_read_receipt, private_read_receipt) { + match self.meta.compare_optional_receipts(public_read_receipt, private_read_receipt) { Ordering::Greater => public_read_receipt?, Ordering::Less => private_read_receipt?, _ => unreachable!(), }; // Find the corresponding visible event. - self.all_events + self.meta + .all_events .iter() .rev() .skip_while(|ev| ev.event_id != *latest_receipt_id) diff --git a/crates/matrix-sdk-ui/src/timeline/sliding_sync_ext.rs b/crates/matrix-sdk-ui/src/timeline/sliding_sync_ext.rs index df20e2de074..3ac674210ea 100644 --- a/crates/matrix-sdk-ui/src/timeline/sliding_sync_ext.rs +++ b/crates/matrix-sdk-ui/src/timeline/sliding_sync_ext.rs @@ -44,12 +44,11 @@ impl SlidingSyncRoomExt for SlidingSyncRoom { #[cfg(test)] mod tests { use assert_matches::assert_matches; - use matrix_sdk::{config::RequestConfig, Client, ClientBuilder, SlidingSyncRoom}; - use matrix_sdk_base::{deserialized_responses::SyncTimelineEvent, BaseClient, SessionMeta}; + use matrix_sdk::{test_utils::logged_in_client, Client, SlidingSyncRoom}; + use matrix_sdk_base::deserialized_responses::SyncTimelineEvent; use matrix_sdk_test::async_test; use ruma::{ - api::{client::sync::sync_events::v4, MatrixVersion}, - device_id, + api::client::sync::sync_events::v4, events::room::message::{MessageFormat, MessageType}, room_id, serde::Raw, @@ -141,28 +140,4 @@ mod tests { response.rooms.insert(room_id.to_owned(), room); response } - - /// Copied from matrix_sdk_base::sliding_sync::test - async fn logged_in_client(homeserver_url: Option) -> Client { - let base_client = BaseClient::new(); - base_client - .set_session_meta(SessionMeta { - user_id: user_id!("@u:e.uk").to_owned(), - device_id: device_id!("XYZ").to_owned(), - }) - .await - .expect("Failed to set session meta"); - - test_client_builder(homeserver_url) - .request_config(RequestConfig::new().disable_retry()) - .base_client(base_client) - .build() - .await - .unwrap() - } - - fn test_client_builder(homeserver_url: Option) -> ClientBuilder { - let homeserver = homeserver_url.as_deref().unwrap_or("http://localhost:1234"); - Client::builder().homeserver_url(homeserver).server_versions([MatrixVersion::V1_0]) - } } diff --git a/crates/matrix-sdk-ui/src/timeline/tests/basic.rs b/crates/matrix-sdk-ui/src/timeline/tests/basic.rs index c07c35e1677..6519849a052 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/basic.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/basic.rs @@ -34,18 +34,18 @@ use stream_assert::assert_next_matches; use super::TestTimeline; use crate::timeline::{ - event_item::AnyOtherFullStateEventContent, MembershipChange, TimelineDetails, - TimelineItemContent, TimelineItemKind, VirtualTimelineItem, + event_item::AnyOtherFullStateEventContent, inner::TimelineEnd, MembershipChange, + TimelineDetails, TimelineItemContent, TimelineItemKind, VirtualTimelineItem, }; #[async_test] -async fn initial_events() { - let mut timeline = TestTimeline::new(); +async fn test_initial_events() { + let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; timeline .inner - .add_initial_events( + .add_events_at( vec![ SyncTimelineEvent::new( timeline @@ -58,7 +58,7 @@ async fn initial_events() { .make_sync_message_event(*BOB, RoomMessageEventContent::text_plain("B")), ), ], - None, + TimelineEnd::Back { from_cache: false }, ) .await; @@ -71,7 +71,7 @@ async fn initial_events() { } #[async_test] -async fn sticker() { +async fn test_sticker() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; @@ -99,7 +99,7 @@ async fn sticker() { } #[async_test] -async fn room_member() { +async fn test_room_member() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; @@ -167,7 +167,7 @@ async fn room_member() { } #[async_test] -async fn other_state() { +async fn test_other_state() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; @@ -193,7 +193,7 @@ async fn other_state() { } #[async_test] -async fn dedup_pagination() { +async fn test_dedup_pagination() { let timeline = TestTimeline::new(); let event = timeline @@ -216,8 +216,8 @@ async fn dedup_pagination() { } #[async_test] -async fn dedup_initial() { - let mut timeline = TestTimeline::new(); +async fn test_dedup_initial() { + let timeline = TestTimeline::new(); let event_a = SyncTimelineEvent::new( timeline @@ -237,7 +237,7 @@ async fn dedup_initial() { timeline .inner - .add_initial_events( + .add_events_at( vec![ // two events event_a.clone(), @@ -248,7 +248,7 @@ async fn dedup_initial() { // … and a new event also came in event_c, ], - None, + TimelineEnd::Back { from_cache: false }, ) .await; @@ -272,7 +272,7 @@ async fn dedup_initial() { } #[async_test] -async fn sanitized() { +async fn test_sanitized() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; @@ -313,7 +313,7 @@ async fn sanitized() { } #[async_test] -async fn reply() { +async fn test_reply() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; @@ -369,7 +369,7 @@ async fn reply() { } #[async_test] -async fn thread() { +async fn test_thread() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; diff --git a/crates/matrix-sdk-ui/src/timeline/tests/edit.rs b/crates/matrix-sdk-ui/src/timeline/tests/edit.rs index bdf4cf45614..0b4fa4e376c 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/edit.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/edit.rs @@ -31,7 +31,7 @@ use super::TestTimeline; use crate::timeline::TimelineItemContent; #[async_test] -async fn live_redacted() { +async fn test_live_redacted() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; @@ -55,7 +55,7 @@ async fn live_redacted() { } #[async_test] -async fn live_sanitized() { +async fn test_live_sanitized() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; @@ -105,7 +105,7 @@ async fn live_sanitized() { } #[async_test] -async fn aggregated_sanitized() { +async fn test_aggregated_sanitized() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; diff --git a/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs b/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs index be2290e69a8..dd24b9c1e3a 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs @@ -14,7 +14,11 @@ #![cfg(not(target_arch = "wasm32"))] -use std::{io::Cursor, iter}; +use std::{ + io::Cursor, + iter, + sync::{Arc, Mutex}, +}; use assert_matches::assert_matches; use assert_matches2::assert_let; @@ -32,10 +36,13 @@ use ruma::{ use stream_assert::assert_next_matches; use super::TestTimeline; -use crate::timeline::{EncryptedMessage, TimelineItemContent}; +use crate::{ + timeline::{EncryptedMessage, TimelineItemContent}, + unable_to_decrypt_hook::{UnableToDecryptHook, UnableToDecryptInfo, UtdHookManager}, +}; #[async_test] -async fn retry_message_decryption() { +async fn test_retry_message_decryption() { const SESSION_ID: &str = "gM8i47Xhu0q52xLfgUXzanCMpLinoyVyH7R58cBuVBU"; const SESSION_KEY: &[u8] = b"\ -----BEGIN MEGOLM SESSION DATA-----\n\ @@ -51,7 +58,21 @@ async fn retry_message_decryption() { HztoSJUr/2Y\n\ -----END MEGOLM SESSION DATA-----"; - let timeline = TestTimeline::new(); + #[derive(Debug, Default)] + struct DummyUtdHook { + utds: Mutex>, + } + + impl UnableToDecryptHook for DummyUtdHook { + fn on_utd(&self, info: UnableToDecryptInfo) { + self.utds.lock().unwrap().push(info); + } + } + + let hook = Arc::new(DummyUtdHook::default()); + let utd_hook = Arc::new(UtdHookManager::new(hook.clone())); + + let timeline = TestTimeline::with_unable_to_decrypt_hook(utd_hook.clone()); let mut stream = timeline.subscribe().await; timeline @@ -92,6 +113,13 @@ async fn retry_message_decryption() { ); assert_eq!(session_id, SESSION_ID); + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 1); + assert_eq!(utds[0].event_id, event.event_id().unwrap()); + assert!(utds[0].time_to_decrypt.is_none()); + } + let own_user_id = user_id!("@example:morheus.localhost"); let exported_keys = decrypt_room_key_export(Cursor::new(SESSION_KEY), "1234").unwrap(); @@ -115,10 +143,23 @@ async fn retry_message_decryption() { assert_let!(TimelineItemContent::Message(message) = event.content()); assert_eq!(message.body(), "It's a secret to everybody"); assert!(!event.is_highlighted()); + + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 2); + + // The previous UTD report is still there. + assert_eq!(utds[0].event_id, event.event_id().unwrap()); + assert!(utds[0].time_to_decrypt.is_none()); + + // The UTD is now *also* reported as a late-decryption event. + assert_eq!(utds[1].event_id, event.event_id().unwrap()); + assert!(utds[1].time_to_decrypt.is_some()); + } } #[async_test] -async fn retry_edit_decryption() { +async fn test_retry_edit_decryption() { const SESSION1_KEY: &[u8] = b"\ -----BEGIN MEGOLM SESSION DATA-----\n\ AXou7bY+PWm0GrxTioyoKTkxAgfrQ5lGIla62WoBMrqWAAAACgXidLIt0gaK5NT3mGigzFAPjh/M0ibXjSvo\ @@ -224,7 +265,7 @@ async fn retry_edit_decryption() { } #[async_test] -async fn retry_edit_and_more() { +async fn test_retry_edit_and_more() { const DEVICE_ID: &str = "MTEGRRVPEN"; const SENDER_KEY: &str = "NFPM2+ucU3n3sEdbDdwwv48Bsj4AiQ185lGuRFjy+gs"; const SESSION_ID: &str = "SMNh04luorH5E8J3b4XYuOBFp8dldO5njacq0OFO70o"; @@ -329,7 +370,7 @@ async fn retry_edit_and_more() { } #[async_test] -async fn retry_message_decryption_highlighted() { +async fn test_retry_message_decryption_highlighted() { const SESSION_ID: &str = "C25PoE+4MlNidQD0YU5ibZqHawV0zZ/up7R8vYJBYTY"; const SESSION_KEY: &[u8] = b"\ -----BEGIN MEGOLM SESSION DATA-----\n\ diff --git a/crates/matrix-sdk-ui/src/timeline/tests/mod.rs b/crates/matrix-sdk-ui/src/timeline/tests/mod.rs index ba1a4a6d8bc..2d5714b6674 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/mod.rs @@ -49,11 +49,12 @@ use ruma::{ use super::{ event_item::EventItemIdentifier, - inner::{ReactionAction, TimelineInnerSettings}, + inner::{ReactionAction, TimelineEnd, TimelineInnerSettings}, reactions::ReactionToggleResult, traits::RoomDataProvider, EventTimelineItem, Profile, TimelineInner, TimelineItem, }; +use crate::unable_to_decrypt_hook::UtdHookManager; mod basic; mod echo; @@ -62,7 +63,6 @@ mod edit; mod encryption; mod event_filter; mod invalid; -mod pagination; mod polls; mod reaction_group; mod reactions; @@ -81,7 +81,17 @@ impl TestTimeline { } fn with_room_data_provider(room_data_provider: TestRoomDataProvider) -> Self { - Self { inner: TimelineInner::new(room_data_provider), event_builder: EventBuilder::new() } + Self { + inner: TimelineInner::new(room_data_provider, None), + event_builder: EventBuilder::new(), + } + } + + fn with_unable_to_decrypt_hook(hook: Arc) -> Self { + Self { + inner: TimelineInner::new(TestRoomDataProvider::default(), Some(hook)), + event_builder: EventBuilder::new(), + } } fn with_settings(mut self, settings: TimelineInnerSettings) -> Self { @@ -233,10 +243,7 @@ impl TestTimeline { async fn handle_back_paginated_custom_event(&self, event: Raw) { let timeline_event = TimelineEvent::new(event.cast()); - self.inner - .handle_back_paginated_events(vec![timeline_event], Default::default()) - .await - .unwrap(); + self.inner.add_events_at(vec![timeline_event], TimelineEnd::Front).await; } async fn handle_read_receipts( diff --git a/crates/matrix-sdk-ui/src/timeline/tests/pagination.rs b/crates/matrix-sdk-ui/src/timeline/tests/pagination.rs deleted file mode 100644 index dd1f327be35..00000000000 --- a/crates/matrix-sdk-ui/src/timeline/tests/pagination.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2023 Kévin Commaille -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use assert_matches2::assert_matches; -use matrix_sdk_base::deserialized_responses::TimelineEvent; -use matrix_sdk_test::async_test; -use ruma::serde::Raw; -use serde_json::json; - -use super::TestTimeline; -use crate::timeline::{inner::HandleBackPaginatedEventsError, pagination::PaginationTokens}; - -#[async_test] -async fn back_pagination_token_not_updated_with_empty_chunk() { - let timeline = TestTimeline::new(); - - timeline - .inner - .handle_back_paginated_events( - vec![], - PaginationTokens { from: None, check_from: true, to: Some("a".to_owned()) }, - ) - .await - .unwrap(); - - // Checking the token fails because it has not been updated. - let err = timeline - .inner - .handle_back_paginated_events( - vec![], - PaginationTokens { - from: Some("a".to_owned()), - check_from: true, - to: Some("b".to_owned()), - }, - ) - .await - .unwrap_err(); - assert_matches!(err, HandleBackPaginatedEventsError::TokenMismatch); - - // Not checking the token works. - timeline - .inner - .handle_back_paginated_events( - vec![], - PaginationTokens { - from: Some("a".to_owned()), - check_from: false, - to: Some("b".to_owned()), - }, - ) - .await - .unwrap(); -} - -#[async_test] -async fn back_pagination_token_not_updated_invalid_event() { - let timeline = TestTimeline::new(); - - // Invalid empty event. - let raw = Raw::new(&json!({})).unwrap(); - - timeline - .inner - .handle_back_paginated_events( - vec![TimelineEvent::new(raw.cast())], - PaginationTokens { from: None, check_from: true, to: Some("a".to_owned()) }, - ) - .await - .unwrap(); - - // Checking the token fails because it has not been updated. - let err = timeline - .inner - .handle_back_paginated_events( - vec![], - PaginationTokens { - from: Some("a".to_owned()), - check_from: true, - to: Some("b".to_owned()), - }, - ) - .await - .unwrap_err(); - assert_matches!(err, HandleBackPaginatedEventsError::TokenMismatch); - - // Not checking the token works. - timeline - .inner - .handle_back_paginated_events( - vec![], - PaginationTokens { - from: Some("a".to_owned()), - check_from: false, - to: Some("b".to_owned()), - }, - ) - .await - .unwrap(); -} diff --git a/crates/matrix-sdk-ui/src/timeline/tests/reactions.rs b/crates/matrix-sdk-ui/src/timeline/tests/reactions.rs index 59792d84bd4..bd9a5207cc2 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/reactions.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/reactions.rs @@ -37,7 +37,7 @@ use crate::timeline::{ const REACTION_KEY: &str = "👍"; #[async_test] -async fn add_reaction_failed() { +async fn test_add_reaction_failed() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -57,7 +57,7 @@ async fn add_reaction_failed() { } #[async_test] -async fn add_reaction_on_non_existent_event() { +async fn test_add_reaction_on_non_existent_event() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let msg_id = EventId::new(server_name!("example.org")); // non existent event @@ -69,7 +69,7 @@ async fn add_reaction_on_non_existent_event() { } #[async_test] -async fn add_reaction_success() { +async fn test_add_reaction_success() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -93,7 +93,7 @@ async fn add_reaction_success() { } #[async_test] -async fn redact_reaction_success() { +async fn test_redact_reaction_success() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -115,7 +115,7 @@ async fn redact_reaction_success() { } #[async_test] -async fn redact_reaction_failure() { +async fn test_redact_reaction_failure() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -141,7 +141,7 @@ async fn redact_reaction_failure() { } #[async_test] -async fn redact_reaction_from_non_existent_event() { +async fn test_redact_reaction_from_non_existent_event() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let reaction_id = EventId::new(server_name!("example.org")); // non existent event @@ -154,7 +154,7 @@ async fn redact_reaction_from_non_existent_event() { } #[async_test] -async fn toggle_during_request_resolves_new_action() { +async fn test_toggle_during_request_resolves_new_action() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -205,7 +205,7 @@ async fn toggle_during_request_resolves_new_action() { } #[async_test] -async fn reactions_store_timestamp() { +async fn test_reactions_store_timestamp() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe().await; let (msg_id, msg_pos) = send_first_message(&timeline, &mut stream).await; @@ -238,15 +238,15 @@ async fn reactions_store_timestamp() { } #[async_test] -async fn initial_reaction_timestamp_is_stored() { - let mut timeline = TestTimeline::new(); +async fn test_initial_reaction_timestamp_is_stored() { + let timeline = TestTimeline::new(); let message_event_id = EventId::new(server_name!("dummy.server")); let reaction_timestamp = MilliSecondsSinceUnixEpoch(uint!(39845)); timeline .inner - .add_initial_events( + .add_events_at( vec![ SyncTimelineEvent::new(timeline.event_builder.make_sync_reaction( *ALICE, @@ -259,7 +259,7 @@ async fn initial_reaction_timestamp_is_stored() { RoomMessageEventContent::text_plain("A"), )), ], - None, + crate::timeline::inner::TimelineEnd::Back { from_cache: false }, ) .await; diff --git a/crates/matrix-sdk-ui/src/timeline/tests/read_receipts.rs b/crates/matrix-sdk-ui/src/timeline/tests/read_receipts.rs index 822d35fbe87..776b8e61ff0 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/read_receipts.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/read_receipts.rs @@ -100,7 +100,7 @@ async fn test_read_receipts_updates_on_live_events() { } #[async_test] -async fn read_receipts_updates_on_back_paginated_events() { +async fn test_read_receipts_updates_on_back_paginated_events() { let timeline = TestTimeline::new() .with_settings(TimelineInnerSettings { track_read_receipts: true, ..Default::default() }); let room_id = room_id!("!room:localhost"); @@ -223,7 +223,7 @@ async fn test_read_receipts_updates_on_filtered_events() { } #[async_test] -async fn read_receipts_updates_on_filtered_events_with_stored() { +async fn test_read_receipts_updates_on_filtered_events_with_stored() { let timeline = TestTimeline::new().with_settings(TimelineInnerSettings { track_read_receipts: true, event_filter: Arc::new(filter_notice), @@ -276,7 +276,7 @@ async fn read_receipts_updates_on_filtered_events_with_stored() { } #[async_test] -async fn read_receipts_updates_on_back_paginated_filtered_events() { +async fn test_read_receipts_updates_on_back_paginated_filtered_events() { let timeline = TestTimeline::new().with_settings(TimelineInnerSettings { track_read_receipts: true, event_filter: Arc::new(filter_notice), @@ -331,7 +331,7 @@ async fn read_receipts_updates_on_back_paginated_filtered_events() { #[cfg(feature = "e2e-encryption")] #[async_test] -async fn read_receipts_updates_on_message_decryption() { +async fn test_read_receipts_updates_on_message_decryption() { use std::{io::Cursor, iter}; use assert_matches::assert_matches; @@ -466,7 +466,7 @@ async fn read_receipts_updates_on_message_decryption() { } #[async_test] -async fn initial_public_unthreaded_receipt() { +async fn test_initial_public_unthreaded_receipt() { let event_id = owned_event_id!("$event_with_receipt"); // Add initial unthreaded public receipt. @@ -491,7 +491,7 @@ async fn initial_public_unthreaded_receipt() { } #[async_test] -async fn initial_public_main_thread_receipt() { +async fn test_initial_public_main_thread_receipt() { let event_id = owned_event_id!("$event_with_receipt"); // Add initial public receipt on the main thread. @@ -516,7 +516,7 @@ async fn initial_public_main_thread_receipt() { } #[async_test] -async fn initial_private_unthreaded_receipt() { +async fn test_initial_private_unthreaded_receipt() { let event_id = owned_event_id!("$event_with_receipt"); // Add initial unthreaded private receipt. @@ -541,7 +541,7 @@ async fn initial_private_unthreaded_receipt() { } #[async_test] -async fn initial_private_main_thread_receipt() { +async fn test_initial_private_main_thread_receipt() { let event_id = owned_event_id!("$event_with_receipt"); // Add initial private receipt on the main thread. @@ -566,7 +566,7 @@ async fn initial_private_main_thread_receipt() { } #[async_test] -async fn clear_read_receipts() { +async fn test_clear_read_receipts() { let room_id = room_id!("!room:localhost"); let event_a_id = event_id!("$event_a"); let event_b_id = event_id!("$event_b"); diff --git a/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs b/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs index a34d5cd045d..b876f05e06f 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs @@ -38,7 +38,7 @@ use super::TestTimeline; use crate::timeline::{AnyOtherFullStateEventContent, TimelineDetails, TimelineItemContent}; #[async_test] -async fn redact_state_event() { +async fn test_redact_state_event() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; @@ -68,7 +68,7 @@ async fn redact_state_event() { } #[async_test] -async fn redact_replied_to_event() { +async fn test_redact_replied_to_event() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; @@ -114,7 +114,7 @@ async fn redact_replied_to_event() { } #[async_test] -async fn reaction_redaction() { +async fn test_reaction_redaction() { let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; @@ -139,20 +139,20 @@ async fn reaction_redaction() { } #[async_test] -async fn reaction_redaction_timeline_filter() { - let mut timeline = TestTimeline::new(); +async fn test_reaction_redaction_timeline_filter() { + let timeline = TestTimeline::new(); let mut stream = timeline.subscribe_events().await; // Initialise a timeline with a redacted reaction. timeline .inner - .add_initial_events( + .add_events_at( vec![SyncTimelineEvent::new( timeline .event_builder .make_sync_redacted_message_event(*ALICE, RedactedReactionEventContent::new()), )], - None, + crate::timeline::inner::TimelineEnd::Back { from_cache: false }, ) .await; // Timeline items are actually empty. @@ -183,7 +183,7 @@ async fn reaction_redaction_timeline_filter() { } #[async_test] -async fn receive_unredacted() { +async fn test_receive_unredacted() { let timeline = TestTimeline::new(); // send two events, second one redacted diff --git a/crates/matrix-sdk-ui/src/timeline/traits.rs b/crates/matrix-sdk-ui/src/timeline/traits.rs index 8cf17680109..81275734090 100644 --- a/crates/matrix-sdk-ui/src/timeline/traits.rs +++ b/crates/matrix-sdk-ui/src/timeline/traits.rs @@ -14,9 +14,9 @@ use async_trait::async_trait; use indexmap::IndexMap; -use matrix_sdk::Room; #[cfg(feature = "e2e-encryption")] use matrix_sdk::{deserialized_responses::TimelineEvent, Result}; +use matrix_sdk::{event_cache, Room}; use matrix_sdk_base::latest_event::LatestEvent; use ruma::{ events::receipt::{Receipt, ReceiptThread, ReceiptType}, @@ -39,7 +39,7 @@ pub trait RoomExt { /// independent events. /// /// This is the same as using `room.timeline_builder().build()`. - async fn timeline(&self) -> crate::event_cache::Result; + async fn timeline(&self) -> event_cache::Result; /// Get a [`TimelineBuilder`] for this room. /// @@ -54,7 +54,7 @@ pub trait RoomExt { #[async_trait] impl RoomExt for Room { - async fn timeline(&self) -> crate::event_cache::Result { + async fn timeline(&self) -> event_cache::Result { self.timeline_builder().build().await } diff --git a/crates/matrix-sdk-ui/src/unable_to_decrypt_hook.rs b/crates/matrix-sdk-ui/src/unable_to_decrypt_hook.rs new file mode 100644 index 00000000000..98adbf89a66 --- /dev/null +++ b/crates/matrix-sdk-ui/src/unable_to_decrypt_hook.rs @@ -0,0 +1,373 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This module provides a generic interface to subscribe to unable-to-decrypt +//! events, and notable updates to such events. +//! +//! This provides a general trait that a consumer may implement, as well as +//! utilities to simplify usage of this trait. + +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +use ruma::{EventId, OwnedEventId}; +use tokio::{spawn, task::JoinHandle, time::sleep}; + +/// A generic interface which methods get called whenever we observe a +/// unable-to-decrypt (UTD) event. +pub trait UnableToDecryptHook: std::fmt::Debug + Send + Sync { + /// Called every time the hook observes an encrypted event that couldn't be + /// decrypted. + /// + /// If the hook manager was configured with a max delay, this could also + /// contain extra information for late-decrypted events. See details in + /// [`UnableToDecryptInfo::time_to_decrypt`]. + fn on_utd(&self, info: UnableToDecryptInfo); +} + +/// Information about an event we were unable to decrypt (UTD). +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct UnableToDecryptInfo { + /// The identifier of the event that couldn't get decrypted. + pub event_id: OwnedEventId, + + /// If the event could be decrypted late (that is, the event was encrypted + /// at first, but could be decrypted later on), then this indicates the + /// time it took to decrypt the event. If it is not set, this is + /// considered a definite UTD. + pub time_to_decrypt: Option, +} + +type PendingUtdReports = Vec<(OwnedEventId, JoinHandle<()>)>; + +/// A manager over an existing [`UnableToDecryptHook`] that deduplicates UTDs +/// on similar events, and adds basic consistency checks. +/// +/// It can also implement a grace period before reporting an event as a UTD, if +/// configured with [`Self::with_max_delay`]. Instead of immediately reporting +/// the UTD, the reporting will be delayed by the max delay at most; if the +/// event could eventually get decrypted, it may be reported before the end of +/// that delay. +#[derive(Debug)] +pub struct UtdHookManager { + /// The parent hook we'll call, when we have found a unique UTD. + parent: Arc, + + /// A mapping of events we've marked as UTDs, and the time at which we + /// observed those UTDs. + /// + /// Note: this is unbounded, because we have absolutely no idea how long it + /// will take for a UTD to resolve, or if it will even resolve at any + /// point. + known_utds: Arc>>, + + /// An optional delay before marking the event as UTD ("grace period"). + max_delay: Option, + + /// The set of outstanding tasks to report deferred UTDs, including the + /// event relating to the task. + /// + /// Note: this is empty if no [`Self::max_delay`] is set. + /// + /// Note: this is theoretically unbounded in size, although this set of + /// tasks will degrow over time, as tasks expire after the max delay. + pending_delayed: Arc>, +} + +impl UtdHookManager { + /// Create a new [`UtdHookManager`] for the given hook. + pub fn new(parent: Arc) -> Self { + Self { + parent, + known_utds: Default::default(), + max_delay: None, + pending_delayed: Default::default(), + } + } + + /// Reports UTDs with the given max delay. + /// + /// Note: late decryptions are always reported, even if there was a grace + /// period set for the reporting of the UTD. + pub fn with_max_delay(mut self, delay: Duration) -> Self { + self.max_delay = Some(delay); + self + } + + /// The function to call whenever a UTD is seen for the first time. + /// + /// Pipe in any information that needs to be included in the final report. + pub(crate) fn on_utd(&self, event_id: &EventId) { + // Only let the parent hook know if the event wasn't already handled. + { + let mut known_utds = self.known_utds.lock().unwrap(); + // Note: we don't want to replace the previous time, so don't look at the result + // of insert to know whether the entry was already present or not. + if known_utds.contains_key(event_id) { + return; + } + known_utds.insert(event_id.to_owned(), Instant::now()); + } + + let info = UnableToDecryptInfo { event_id: event_id.to_owned(), time_to_decrypt: None }; + + let Some(max_delay) = self.max_delay else { + // No delay: immediately report the event to the parent hook. + self.parent.on_utd(info); + return; + }; + + let event_id = info.event_id.clone(); + + // Clone Arc'd pointers shared with the task below. + let known_utds = self.known_utds.clone(); + let pending_delayed = self.pending_delayed.clone(); + let parent = self.parent.clone(); + + // Spawn a task that will wait for the given delay, and maybe call the parent + // hook then. + let handle = spawn(async move { + // Wait for the given delay. + sleep(max_delay).await; + + // In any case, remove the task from the outstanding set. + pending_delayed.lock().unwrap().retain(|(event_id, _)| *event_id != info.event_id); + + // Check if the event is still in the map: if not, it's been decrypted since + // then! + if known_utds.lock().unwrap().contains_key(&info.event_id) { + parent.on_utd(info); + } + }); + + // Add the task to the set of pending tasks. + self.pending_delayed.lock().unwrap().push((event_id, handle)); + } + + /// The function to call whenever an event that was marked as a UTD has + /// eventually been decrypted. + /// + /// Note: if this is called for an event that was never marked as a UTD + /// before, it has no effect. + pub(crate) fn on_late_decrypt(&self, event_id: &EventId) { + // Only let the parent hook know if the event was known to be a UTDs. + let Some(marked_utd_at) = self.known_utds.lock().unwrap().remove(event_id) else { + return; + }; + + let info = UnableToDecryptInfo { + event_id: event_id.to_owned(), + time_to_decrypt: Some(marked_utd_at.elapsed()), + }; + + // Cancel and remove the task from the outstanding set immediately. + self.pending_delayed.lock().unwrap().retain(|(event_id, task)| { + if *event_id == info.event_id { + task.abort(); + false + } else { + true + } + }); + + // Report to the parent hook. + self.parent.on_utd(info); + } +} + +impl Drop for UtdHookManager { + fn drop(&mut self) { + // Cancel all the outstanding delayed tasks to report UTDs. + let mut pending_delayed = self.pending_delayed.lock().unwrap(); + for (_, task) in pending_delayed.drain(..) { + task.abort(); + } + } +} + +#[cfg(test)] +mod tests { + use matrix_sdk_test::async_test; + use ruma::event_id; + + use super::*; + + #[derive(Debug, Default)] + struct Dummy { + utds: Mutex>, + } + + impl UnableToDecryptHook for Dummy { + fn on_utd(&self, info: UnableToDecryptInfo) { + self.utds.lock().unwrap().push(info); + } + } + + #[test] + fn test_deduplicates_utds() { + // If I create a dummy hook, + let hook = Arc::new(Dummy::default()); + + // And I wrap with the UtdHookManager, + let wrapper = UtdHookManager::new(hook.clone()); + + // And I call the `on_utd` method multiple times, sometimes on the same event, + wrapper.on_utd(event_id!("$1")); + wrapper.on_utd(event_id!("$1")); + wrapper.on_utd(event_id!("$2")); + wrapper.on_utd(event_id!("$1")); + wrapper.on_utd(event_id!("$2")); + wrapper.on_utd(event_id!("$3")); + + // Then the event ids have been deduplicated, + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 3); + assert_eq!(utds[0].event_id, event_id!("$1")); + assert_eq!(utds[1].event_id, event_id!("$2")); + assert_eq!(utds[2].event_id, event_id!("$3")); + + // No event is a late-decryption event. + assert!(utds[0].time_to_decrypt.is_none()); + assert!(utds[1].time_to_decrypt.is_none()); + assert!(utds[2].time_to_decrypt.is_none()); + } + } + + #[test] + fn test_on_late_decrypted_no_effect() { + // If I create a dummy hook, + let hook = Arc::new(Dummy::default()); + + // And I wrap with the UtdHookManager, + let wrapper = UtdHookManager::new(hook.clone()); + + // And I call the `on_late_decrypt` method before the event had been marked as + // utd, + wrapper.on_late_decrypt(event_id!("$1")); + + // Then nothing is registered in the parent hook. + assert!(hook.utds.lock().unwrap().is_empty()); + } + + #[test] + fn test_on_late_decrypted_after_utd_no_grace_period() { + // If I create a dummy hook, + let hook = Arc::new(Dummy::default()); + + // And I wrap with the UtdHookManager, + let wrapper = UtdHookManager::new(hook.clone()); + + // And I call the `on_utd` method for an event, + wrapper.on_utd(event_id!("$1")); + + // Then the UTD has been notified, but not as late-decrypted event. + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 1); + assert_eq!(utds[0].event_id, event_id!("$1")); + assert!(utds[0].time_to_decrypt.is_none()); + } + + // And when I call the `on_late_decrypt` method, + wrapper.on_late_decrypt(event_id!("$1")); + + // Then the event is now reported as a late-decryption too. + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 2); + + // The previous report is still there. (There was no grace period.) + assert_eq!(utds[0].event_id, event_id!("$1")); + assert!(utds[0].time_to_decrypt.is_none()); + + // The new report with a late-decryption is there. + assert_eq!(utds[1].event_id, event_id!("$1")); + assert!(utds[1].time_to_decrypt.is_some()); + } + } + + #[cfg(not(target_arch = "wasm32"))] // wasm32 has no time for that + #[async_test] + async fn test_delayed_utd() { + // If I create a dummy hook, + let hook = Arc::new(Dummy::default()); + + // And I wrap with the UtdHookManager, configured to delay reporting after 2 + // seconds. + let wrapper = UtdHookManager::new(hook.clone()).with_max_delay(Duration::from_secs(2)); + + // And I call the `on_utd` method for an event, + wrapper.on_utd(event_id!("$1")); + + // Then the UTD is not being reported immediately. + assert!(hook.utds.lock().unwrap().is_empty()); + assert_eq!(wrapper.pending_delayed.lock().unwrap().len(), 1); + + // If I wait for 1 second, then it's still not been notified yet. + sleep(Duration::from_secs(1)).await; + + assert!(hook.utds.lock().unwrap().is_empty()); + assert_eq!(wrapper.pending_delayed.lock().unwrap().len(), 1); + + // But if I wait just a bit more, then it's getting notified as a definite UTD. + sleep(Duration::from_millis(1500)).await; + + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 1); + assert_eq!(utds[0].event_id, event_id!("$1")); + assert!(utds[0].time_to_decrypt.is_none()); + } + + assert!(wrapper.pending_delayed.lock().unwrap().is_empty()); + } + + #[cfg(not(target_arch = "wasm32"))] // wasm32 has no time for that + #[async_test] + async fn test_delayed_late_decryption() { + // If I create a dummy hook, + let hook = Arc::new(Dummy::default()); + + // And I wrap with the UtdHookManager, configured to delay reporting after 2 + // seconds. + let wrapper = UtdHookManager::new(hook.clone()).with_max_delay(Duration::from_secs(2)); + + // And I call the `on_utd` method for an event, + wrapper.on_utd(event_id!("$1")); + + // Then the UTD has not been notified quite yet. + assert!(hook.utds.lock().unwrap().is_empty()); + assert_eq!(wrapper.pending_delayed.lock().unwrap().len(), 1); + + // If I wait for 1 second, and mark the event as late-decrypted, + sleep(Duration::from_secs(1)).await; + + wrapper.on_late_decrypt(event_id!("$1")); + + // Then it's being immediately reported as a late-decryption UTD. + { + let utds = hook.utds.lock().unwrap(); + assert_eq!(utds.len(), 1); + assert_eq!(utds[0].event_id, event_id!("$1")); + assert!(utds[0].time_to_decrypt.is_some()); + } + + // And there aren't any pending delayed reports anymore. + assert!(wrapper.pending_delayed.lock().unwrap().is_empty()); + } +} diff --git a/crates/matrix-sdk-ui/tests/integration/encryption_sync_service.rs b/crates/matrix-sdk-ui/tests/integration/encryption_sync_service.rs index 75c80ff43af..a75b835921f 100644 --- a/crates/matrix-sdk-ui/tests/integration/encryption_sync_service.rs +++ b/crates/matrix-sdk-ui/tests/integration/encryption_sync_service.rs @@ -1,6 +1,7 @@ use std::sync::{Arc, Mutex}; use futures_util::{pin_mut, StreamExt as _}; +use matrix_sdk::test_utils::logged_in_client_with_server; use matrix_sdk_test::async_test; use matrix_sdk_ui::encryption_sync_service::{ EncryptionSyncPermit, EncryptionSyncService, WithLocking, @@ -10,14 +11,13 @@ use tokio::sync::Mutex as AsyncMutex; use wiremock::{Mock, MockGuard, MockServer, Request, ResponseTemplate}; use crate::{ - logged_in_client, sliding_sync::{check_requests, PartialSlidingSyncRequest, SlidingSyncMatcher}, sliding_sync_then_assert_request_and_fake_response, }; #[async_test] async fn test_smoke_encryption_sync_works() -> anyhow::Result<()> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_permit = Arc::new(AsyncMutex::new(EncryptionSyncPermit::new_for_testing())); let sync_permit_guard = sync_permit.clone().lock_owned().await; @@ -161,7 +161,7 @@ async fn setup_mocking_sliding_sync_server(server: &MockServer) -> MockGuard { #[async_test] async fn test_encryption_sync_one_fixed_iteration() -> anyhow::Result<()> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let _guard = setup_mocking_sliding_sync_server(&server).await; @@ -193,7 +193,7 @@ async fn test_encryption_sync_one_fixed_iteration() -> anyhow::Result<()> { #[async_test] async fn test_encryption_sync_two_fixed_iterations() -> anyhow::Result<()> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let _guard = setup_mocking_sliding_sync_server(&server).await; @@ -230,7 +230,7 @@ async fn test_encryption_sync_two_fixed_iterations() -> anyhow::Result<()> { #[async_test] async fn test_encryption_sync_always_reloads_todevice_token() -> anyhow::Result<()> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_permit = Arc::new(AsyncMutex::new(EncryptionSyncPermit::new_for_testing())); let sync_permit_guard = sync_permit.lock_owned().await; diff --git a/crates/matrix-sdk-ui/tests/integration/main.rs b/crates/matrix-sdk-ui/tests/integration/main.rs index 64bb6b9dbe5..476570138c8 100644 --- a/crates/matrix-sdk-ui/tests/integration/main.rs +++ b/crates/matrix-sdk-ui/tests/integration/main.rs @@ -12,14 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use matrix_sdk::{ - config::RequestConfig, - matrix_auth::{MatrixSession, MatrixSessionTokens}, - Client, ClientBuilder, -}; -use matrix_sdk_base::SessionMeta; use matrix_sdk_test::test_json; -use ruma::{api::MatrixVersion, device_id, user_id}; use serde::Serialize; use wiremock::{ matchers::{header, method, path, path_regex, query_param, query_param_is_missing}, @@ -35,34 +28,6 @@ mod timeline; matrix_sdk_test::init_tracing_for_tests!(); -async fn test_client_builder() -> (ClientBuilder, MockServer) { - let server = MockServer::start().await; - let builder = - Client::builder().homeserver_url(server.uri()).server_versions([MatrixVersion::V1_0]); - (builder, server) -} - -async fn no_retry_test_client() -> (Client, MockServer) { - let (builder, server) = test_client_builder().await; - let client = - builder.request_config(RequestConfig::new().disable_retry()).build().await.unwrap(); - (client, server) -} - -async fn logged_in_client() -> (Client, MockServer) { - let session = MatrixSession { - meta: SessionMeta { - user_id: user_id!("@example:localhost").to_owned(), - device_id: device_id!("DEVICEID").to_owned(), - }, - tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, - }; - let (client, server) = no_retry_test_client().await; - client.restore_session(session).await.unwrap(); - - (client, server) -} - /// Mount a Mock on the given server to handle the `GET /sync` endpoint with /// an optional `since` param that returns a 200 status code with the given /// response body. diff --git a/crates/matrix-sdk-ui/tests/integration/notification_client.rs b/crates/matrix-sdk-ui/tests/integration/notification_client.rs index bf74992af52..071a33b8d16 100644 --- a/crates/matrix-sdk-ui/tests/integration/notification_client.rs +++ b/crates/matrix-sdk-ui/tests/integration/notification_client.rs @@ -4,7 +4,7 @@ use std::{ }; use assert_matches::assert_matches; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{async_test, sync_timeline_event, JoinedRoomBuilder, SyncResponseBuilder}; use matrix_sdk_ui::{ notification_client::{ @@ -20,14 +20,14 @@ use wiremock::{ }; use crate::{ - logged_in_client, mock_encryption_state, mock_sync, + mock_encryption_state, mock_sync, sliding_sync::{check_requests, PartialSlidingSyncRequest, SlidingSyncMatcher}, }; #[async_test] async fn test_notification_client_with_context() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -113,7 +113,7 @@ async fn test_notification_client_with_context() { #[async_test] async fn test_notification_client_sliding_sync() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_id = event_id!("$example_event_id"); let sender = user_id!("@user:example.org"); diff --git a/crates/matrix-sdk-ui/tests/integration/room_list_service.rs b/crates/matrix-sdk-ui/tests/integration/room_list_service.rs index f8c89ff7dbe..f38be86a14e 100644 --- a/crates/matrix-sdk-ui/tests/integration/room_list_service.rs +++ b/crates/matrix-sdk-ui/tests/integration/room_list_service.rs @@ -7,7 +7,7 @@ use assert_matches::assert_matches; use eyeball_im::VectorDiff; use futures_util::{pin_mut, FutureExt, StreamExt}; use imbl::vector; -use matrix_sdk::Client; +use matrix_sdk::{test_utils::logged_in_client_with_server, Client}; use matrix_sdk_base::sync::UnreadNotificationsCount; use matrix_sdk_test::async_test; use matrix_sdk_ui::{ @@ -31,13 +31,10 @@ use stream_assert::{assert_next_matches, assert_pending}; use tokio::{spawn, sync::mpsc::channel, task::yield_now}; use wiremock::MockServer; -use crate::{ - logged_in_client, - timeline::sliding_sync::{assert_timeline_stream, timeline_event}, -}; +use crate::timeline::sliding_sync::{assert_timeline_stream, timeline_event}; async fn new_room_list_service() -> Result<(Client, MockServer, RoomListService), Error> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let room_list = RoomListService::new(client.clone()).await?; Ok((client, server, room_list)) @@ -2568,7 +2565,7 @@ async fn test_room_timeline() -> Result<(), Error> { }; let room = room_list.room(room_id).await?; - room.init_timeline_with_builder(room.default_room_timeline_builder().await).await?; + room.init_timeline_with_builder(room.default_room_timeline_builder().await.unwrap()).await?; let timeline = room.timeline().unwrap(); let (previous_timeline_items, mut timeline_items_stream) = timeline.subscribe().await; @@ -2650,7 +2647,7 @@ async fn test_room_latest_event() -> Result<(), Error> { }; let room = room_list.room(room_id).await?; - room.init_timeline_with_builder(room.default_room_timeline_builder().await).await?; + room.init_timeline_with_builder(room.default_room_timeline_builder().await.unwrap()).await?; // The latest event does not exist. assert!(room.latest_event().await.is_none()); diff --git a/crates/matrix-sdk-ui/tests/integration/sync_service.rs b/crates/matrix-sdk-ui/tests/integration/sync_service.rs index b901edb2fed..dae23a4261f 100644 --- a/crates/matrix-sdk-ui/tests/integration/sync_service.rs +++ b/crates/matrix-sdk-ui/tests/integration/sync_service.rs @@ -17,16 +17,14 @@ use std::{ time::Duration, }; +use matrix_sdk::test_utils::logged_in_client_with_server; use matrix_sdk_test::async_test; use matrix_sdk_ui::sync_service::{State, SyncService}; use serde_json::json; use stream_assert::{assert_next_matches, assert_pending}; use wiremock::{Match as _, Mock, MockGuard, MockServer, Request, ResponseTemplate}; -use crate::{ - logged_in_client, - sliding_sync::{PartialSlidingSyncRequest, SlidingSyncMatcher}, -}; +use crate::sliding_sync::{PartialSlidingSyncRequest, SlidingSyncMatcher}; /// Sets up a sliding sync server that use different `pos` values for the /// encrptyion and the room sync. @@ -61,7 +59,7 @@ async fn setup_mocking_sliding_sync_server( #[async_test] async fn test_sync_service_state() -> anyhow::Result<()> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let encryption_pos = Arc::new(Mutex::new(0)); let room_pos = Arc::new(Mutex::new(0)); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/echo.rs b/crates/matrix-sdk-ui/tests/integration/timeline/echo.rs index 06cd9842d8e..ca1f8ee214b 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/echo.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/echo.rs @@ -18,7 +18,10 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::{config::SyncSettings, executor::spawn, ruma::MilliSecondsSinceUnixEpoch}; +use matrix_sdk::{ + config::SyncSettings, executor::spawn, ruma::MilliSecondsSinceUnixEpoch, + test_utils::logged_in_client_with_server, +}; use matrix_sdk_test::{async_test, sync_timeline_event, JoinedRoomBuilder, SyncResponseBuilder}; use matrix_sdk_ui::timeline::{ EventSendState, RoomExt, TimelineItemContent, TimelineItemKind, VirtualTimelineItem, @@ -35,12 +38,12 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync}; +use crate::{mock_encryption_state, mock_sync}; #[async_test] async fn test_echo() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -130,7 +133,7 @@ async fn test_echo() { #[async_test] async fn test_retry_failed() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -186,7 +189,7 @@ async fn test_retry_failed() { #[async_test] async fn test_dedup_by_event_id_late() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -255,7 +258,7 @@ async fn test_dedup_by_event_id_late() { #[async_test] async fn test_cancel_failed() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/edit.rs b/crates/matrix-sdk-ui/tests/integration/timeline/edit.rs index 2611ec688c2..8c8e581ea19 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/edit.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/edit.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, EventBuilder, JoinedRoomBuilder, SyncResponseBuilder, ALICE, BOB, }; @@ -46,12 +46,12 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync}; +use crate::{mock_encryption_state, mock_sync}; #[async_test] async fn test_edit() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -156,7 +156,7 @@ async fn test_edit() { #[async_test] async fn test_send_edit() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -227,7 +227,7 @@ async fn test_send_edit() { #[async_test] async fn test_send_reply_edit() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -314,7 +314,7 @@ async fn test_send_reply_edit() { #[async_test] async fn test_send_edit_poll() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs index 231bf0cb6a8..886f4ee547d 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, sync_timeline_event, JoinedRoomBuilder, RoomAccountDataTestEvent, StateTestEvent, SyncResponseBuilder, @@ -26,7 +26,7 @@ use matrix_sdk_test::{ use matrix_sdk_ui::timeline::{RoomExt, TimelineItemContent, VirtualTimelineItem}; use ruma::{room_id, user_id}; -use crate::{logged_in_client, mock_sync}; +use crate::mock_sync; mod echo; mod edit; @@ -42,7 +42,7 @@ pub(crate) mod sliding_sync; #[async_test] async fn test_reaction() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -151,7 +151,7 @@ async fn test_reaction() { #[async_test] async fn test_redacted_message() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -209,7 +209,7 @@ async fn test_redacted_message() { #[async_test] async fn test_read_marker() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -284,7 +284,7 @@ async fn test_read_marker() { #[async_test] async fn test_sync_highlighted() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/pagination.rs b/crates/matrix-sdk-ui/tests/integration/timeline/pagination.rs index 425e7470822..6fd8778e121 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/pagination.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/pagination.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::future::{join, join3}; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, EventBuilder, JoinedRoomBuilder, StateTestEvent, SyncResponseBuilder, ALICE, BOB, }; @@ -42,12 +42,12 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::mock_sync; #[async_test] async fn test_back_pagination() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -138,7 +138,7 @@ async fn test_back_pagination() { #[async_test] async fn test_back_pagination_highlighted() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -225,7 +225,7 @@ async fn test_back_pagination_highlighted() { #[async_test] async fn test_wait_for_token() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); @@ -286,7 +286,7 @@ async fn test_wait_for_token() { #[async_test] async fn test_dedup() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); @@ -327,22 +327,26 @@ async fn test_dedup() { ); mock_sync(&server, sync_builder.build_json_sync_response(), None).await; + // If I try to paginate twice at the same time, let paginate_1 = async { timeline.paginate_backwards(PaginationOptions::simple_request(10)).await.unwrap(); }; let paginate_2 = async { timeline.paginate_backwards(PaginationOptions::simple_request(10)).await.unwrap(); }; - timeout(Duration::from_secs(2), join(paginate_1, paginate_2)).await.unwrap(); + timeout(Duration::from_secs(5), join(paginate_1, paginate_2)).await.unwrap(); - // Make sure pagination was called (with the right parameters) + // Then only one request is actually sent to the server (i.e. the number of + // `expect()`ed requested is indeed 1. + // + // Make sure pagination was called (with the right parameters). server.verify().await; } #[async_test] async fn test_timeline_reset_while_paginating() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); @@ -519,7 +523,7 @@ pub static ROOM_MESSAGES_BATCH_2: Lazy = Lazy::new(|| { #[async_test] async fn test_empty_chunk() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -609,7 +613,7 @@ async fn test_empty_chunk() { #[async_test] async fn test_until_num_items_with_empty_chunk() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/profiles.rs b/crates/matrix-sdk-ui/tests/integration/timeline/profiles.rs index 769348ced5f..fb1eeed577d 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/profiles.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/profiles.rs @@ -15,7 +15,7 @@ use std::{sync::Arc, time::Duration}; use assert_matches::assert_matches; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, EventBuilder, JoinedRoomBuilder, SyncResponseBuilder, ALICE, BOB, CAROL, DEFAULT_TEST_ROOM_ID, @@ -31,11 +31,11 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::mock_sync; #[async_test] async fn test_update_sender_profiles() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/queue.rs b/crates/matrix-sdk-ui/tests/integration/timeline/queue.rs index c547fb3f040..883b842072b 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/queue.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/queue.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{async_test, EventBuilder, JoinedRoomBuilder, SyncResponseBuilder, ALICE}; use matrix_sdk_ui::timeline::{EventItemOrigin, EventSendState, RoomExt}; use ruma::{events::room::message::RoomMessageEventContent, room_id}; @@ -30,12 +30,12 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync}; +use crate::{mock_encryption_state, mock_sync}; #[async_test] async fn test_message_order() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -107,7 +107,7 @@ async fn test_message_order() { #[async_test] async fn test_retry_order() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -213,7 +213,7 @@ async fn test_retry_order() { #[async_test] async fn test_clear_with_echoes() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/read_receipts.rs b/crates/matrix-sdk-ui/tests/integration/timeline/read_receipts.rs index 015842844e9..1f908bcbac8 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/read_receipts.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/read_receipts.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::{config::SyncSettings, room::Receipts}; +use matrix_sdk::{config::SyncSettings, room::Receipts, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, sync_timeline_event, EphemeralTestEvent, JoinedRoomBuilder, RoomAccountDataTestEvent, SyncResponseBuilder, ALICE, BOB, @@ -40,7 +40,7 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::mock_sync; fn filter_notice(ev: &AnySyncTimelineEvent, _room_version: &RoomVersionId) -> bool { match ev { @@ -54,7 +54,7 @@ fn filter_notice(ev: &AnySyncTimelineEvent, _room_version: &RoomVersionId) -> bo #[async_test] async fn test_read_receipts_updates() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); @@ -283,7 +283,7 @@ async fn test_read_receipts_updates() { #[async_test] async fn test_read_receipts_updates_on_filtered_events() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); @@ -495,7 +495,7 @@ async fn test_read_receipts_updates_on_filtered_events() { #[async_test] async fn test_send_single_receipt() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); @@ -842,7 +842,7 @@ async fn test_send_single_receipt() { #[async_test] async fn test_mark_as_read() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); @@ -945,7 +945,7 @@ async fn test_mark_as_read() { #[async_test] async fn test_send_multiple_receipts() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); @@ -1153,7 +1153,7 @@ async fn test_send_multiple_receipts() { #[async_test] async fn test_latest_user_read_receipt() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let own_user_id = client.user_id().unwrap(); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/replies.rs b/crates/matrix-sdk-ui/tests/integration/timeline/replies.rs index c9f90c073dd..43bc2c1e0a7 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/replies.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/replies.rs @@ -4,7 +4,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_base::timeout::timeout; use matrix_sdk_test::{ async_test, EventBuilder, JoinedRoomBuilder, SyncResponseBuilder, ALICE, BOB, CAROL, @@ -31,12 +31,12 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync}; +use crate::{mock_encryption_state, mock_sync}; #[async_test] async fn in_reply_to_details() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -173,7 +173,7 @@ async fn in_reply_to_details() { #[async_test] async fn transfer_in_reply_to_details_to_re_received_item() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -255,7 +255,7 @@ async fn transfer_in_reply_to_details_to_re_received_item() { #[async_test] async fn send_reply() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); @@ -346,7 +346,7 @@ async fn send_reply() { #[async_test] async fn send_reply_to_threaded() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_builder = EventBuilder::new(); let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/sliding_sync.rs b/crates/matrix-sdk-ui/tests/integration/timeline/sliding_sync.rs index dcdeaad0fc3..12e2c405d0d 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/sliding_sync.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/sliding_sync.rs @@ -20,7 +20,8 @@ use assert_matches2::assert_let; use eyeball_im::{Vector, VectorDiff}; use futures_util::{pin_mut, FutureExt, Stream, StreamExt}; use matrix_sdk::{ - SlidingSync, SlidingSyncList, SlidingSyncListBuilder, SlidingSyncMode, UpdateSummary, + test_utils::logged_in_client_with_server, SlidingSync, SlidingSyncList, SlidingSyncListBuilder, + SlidingSyncMode, UpdateSummary, }; use matrix_sdk_test::async_test; use matrix_sdk_ui::{ @@ -31,8 +32,6 @@ use ruma::{room_id, user_id, RoomId}; use serde_json::json; use wiremock::{http::Method, Match, Mock, MockServer, Request, ResponseTemplate}; -use crate::logged_in_client; - macro_rules! receive_response { ( [$server:ident, $sliding_sync_stream:ident] @@ -201,7 +200,7 @@ macro_rules! assert_timeline_stream { pub(crate) use assert_timeline_stream; async fn new_sliding_sync(lists: Vec) -> Result<(MockServer, SlidingSync)> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut sliding_sync_builder = client.sliding_sync("integration-test")?; @@ -251,17 +250,23 @@ async fn timeline_test_helper( let sliding_sync_room = sliding_sync.get_room(room_id).await.unwrap(); let room_id = sliding_sync_room.room_id(); - let sdk_room = sliding_sync_room.client().get_room(room_id).ok_or_else(|| { + let client = sliding_sync_room.client(); + let sdk_room = client.get_room(room_id).ok_or_else(|| { anyhow::anyhow!("Room {room_id} not found in client. Can't provide a timeline for it") })?; - let timeline = Timeline::builder(&sdk_room) - .events(sliding_sync_room.prev_batch(), sliding_sync_room.timeline_queue()) - .await - .track_read_marker_and_receipts() - .build() + // TODO: when the event cache handles its own cache, we can remove this. + client + .event_cache() + .add_initial_events( + room_id, + sliding_sync_room.timeline_queue().iter().cloned().collect(), + sliding_sync_room.prev_batch(), + ) .await?; + let timeline = Timeline::builder(&sdk_room).track_read_marker_and_receipts().build().await?; + Ok(timeline.subscribe().await) } diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/subscribe.rs b/crates/matrix-sdk-ui/tests/integration/timeline/subscribe.rs index a06f099b846..0ded3b88859 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/subscribe.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/subscribe.rs @@ -18,7 +18,7 @@ use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use futures_util::{pin_mut, StreamExt}; -use matrix_sdk::config::SyncSettings; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server}; use matrix_sdk_test::{ async_test, sync_timeline_event, EventBuilder, GlobalAccountDataTestEvent, JoinedRoomBuilder, SyncResponseBuilder, ALICE, BOB, @@ -35,12 +35,12 @@ use ruma::{ use serde_json::json; use stream_assert::{assert_next_matches, assert_pending}; -use crate::{logged_in_client, mock_sync}; +use crate::mock_sync; #[async_test] async fn test_batched() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let event_builder = EventBuilder::new(); @@ -89,7 +89,7 @@ async fn test_batched() { #[async_test] async fn test_event_filter() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -195,7 +195,7 @@ async fn test_event_filter() { #[async_test] async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); @@ -339,7 +339,7 @@ async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() { #[async_test] async fn test_profile_updates() { let room_id = room_id!("!a98sd12bjh:example.org"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); diff --git a/crates/matrix-sdk/CHANGELOG.md b/crates/matrix-sdk/CHANGELOG.md index 89d8493a928..9cfe0a78819 100644 --- a/crates/matrix-sdk/CHANGELOG.md +++ b/crates/matrix-sdk/CHANGELOG.md @@ -15,6 +15,10 @@ Additions: - Add the `ClientBuilder::add_root_certificates()` method which re-exposes the `reqwest::ClientBuilder::add_root_certificate()` functionality. +- Add `Room::get_user_power_level(user_id)` and `Room::get_suggested_user_role(user_id)` to be able to fetch power level info about an user without loading the room member list. +- Add new method `discard_room_key` on `Room` that allows to discard the current + outbound session for that room. Can be used by clients as a dev tool like the `/discardsession` command. +- Add a new `LinkedChunk` data structure to represents all events per room ([#3166](https://github.com/matrix-org/matrix-rust-sdk/pull/3166)). # 0.7.0 @@ -76,6 +80,8 @@ Additions: - Add `Client::subscribe_to_room_updates` and `room::Common::subscribe_to_updates` - Add `Client::rooms_filtered` - Add methods on `Client` that can handle several authentication APIs. +- Add new method `force_discard_session` on `Room` that allows to discard the current + outbound session (room key) for that room. Can be used by clients for the `/discardsession` command. # 0.6.2 diff --git a/crates/matrix-sdk/Cargo.toml b/crates/matrix-sdk/Cargo.toml index 73b6f54d5ee..85cec662c6f 100644 --- a/crates/matrix-sdk/Cargo.toml +++ b/crates/matrix-sdk/Cargo.toml @@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["e2e-encryption", "automatic-room-key-forwarding", "sqlite", "native-tls"] -testing = ["matrix-sdk-sqlite?/testing", "matrix-sdk-indexeddb?/testing", "matrix-sdk-base/testing"] +testing = ["matrix-sdk-sqlite?/testing", "matrix-sdk-indexeddb?/testing", "matrix-sdk-base/testing", "wiremock"] e2e-encryption = [ "matrix-sdk-base/e2e-encryption", @@ -82,7 +82,7 @@ futures-core = { workspace = true } futures-util = { workspace = true } http = { workspace = true } hyper = { version = "0.14.20", features = ["http1", "http2", "server"], optional = true } -imbl = { version = "2.0.0", features = ["serde"] } +imbl = { workspace = true, features = ["serde"] } indexmap = "2.0.2" js_int = "0.2.2" language-tags = { version = "0.3.2", optional = true } @@ -142,6 +142,7 @@ backoff = { version = "0.4.0", features = ["tokio"] } reqwest = { version = "0.11.10", default_features = false, features = ["stream"] } tokio = { workspace = true, features = ["fs", "rt", "macros"] } tokio-util = "0.7.9" +wiremock = { version = "0.5.13", optional = true } [dev-dependencies] anyhow = { workspace = true } @@ -162,7 +163,6 @@ wasm-bindgen-test = "0.3.33" [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -wiremock = "0.5.13" [[test]] name = "integration" diff --git a/crates/matrix-sdk/src/client/builder.rs b/crates/matrix-sdk/src/client/builder.rs index 54dd1912512..5691d801417 100644 --- a/crates/matrix-sdk/src/client/builder.rs +++ b/crates/matrix-sdk/src/client/builder.rs @@ -38,7 +38,7 @@ use crate::http_client::HttpSettings; use crate::oidc::OidcCtx; use crate::{ authentication::AuthCtx, config::RequestConfig, error::RumaApiError, http_client::HttpClient, - sanitize_server_name, HttpError, + HttpError, IdParseError, }; /// Builder that allows creating and configuring various parts of a [`Client`]. @@ -418,48 +418,8 @@ impl ClientBuilder { } HomeserverConfig::ServerNameOrUrl(server_name_or_url) => { - // Store the result to return at the end. If this doesn't get modified, then the - // supplied name is neither a server name, nor a valid URL. - let mut homeserver_details: Option<( - String, - Option, - )> = None; - let mut discovery_error: Option = None; - - // Attempt discovery as a server name first. - let sanitize_result = sanitize_server_name(&server_name_or_url); - if let Ok(server_name) = sanitize_result.as_ref() { - let protocol = if server_name_or_url.starts_with("http://") { - UrlScheme::Http - } else { - UrlScheme::Https - }; - - match discover_homeserver(server_name.clone(), protocol, &http_client).await { - Ok(well_known) => { - homeserver_details = - Some((well_known.homeserver.base_url.clone(), Some(well_known))); - } - Err(e) => { - debug!(error = %e, "Well-known discovery failed."); - discovery_error = Some(e); - } - } - } - - // When discovery fails, or the input isn't a valid server name, fallback to - // trying a homeserver URL if supplied. - if homeserver_details.is_none() { - if let Ok(homeserver_url) = Url::parse(&server_name_or_url) { - // Make sure the URL is definitely for a homeserver. - if check_is_homeserver(&homeserver_url, &http_client).await { - homeserver_details = Some((homeserver_url.to_string(), None)); - } - } - } - - homeserver_details - .ok_or(discovery_error.unwrap_or(ClientBuildError::InvalidServerName))? + discover_homeserver_from_server_name_or_url(server_name_or_url, &http_client) + .await? } }; @@ -499,6 +459,7 @@ impl ClientBuilder { oidc: OidcCtx::new(authentication_server_info, allow_insecure_oidc), }); + let event_cache = OnceCell::new(); let inner = ClientInner::new( auth_ctx, homeserver, @@ -507,10 +468,13 @@ impl ClientBuilder { http_client, base_client, self.server_versions, + None, self.respect_login_well_known, + event_cache, #[cfg(feature = "e2e-encryption")] self.encryption_settings, - ); + ) + .await; debug!("Done building the Client"); @@ -518,6 +482,63 @@ impl ClientBuilder { } } +/// Discovers a homeserver from a server name or a URL. +/// +/// Tries well-known discovery and checking if the URL points to a homeserver. +async fn discover_homeserver_from_server_name_or_url( + mut server_name_or_url: String, + http_client: &HttpClient, +) -> Result<(String, Option), ClientBuildError> { + let mut discovery_error: Option = None; + + // Attempt discovery as a server name first. + let sanitize_result = sanitize_server_name(&server_name_or_url); + + if let Ok(server_name) = sanitize_result.as_ref() { + let protocol = if server_name_or_url.starts_with("http://") { + UrlScheme::Http + } else { + UrlScheme::Https + }; + + match discover_homeserver(server_name.clone(), protocol, http_client).await { + Ok(well_known) => { + return Ok((well_known.homeserver.base_url.clone(), Some(well_known))); + } + Err(e) => { + debug!(error = %e, "Well-known discovery failed."); + discovery_error = Some(e); + + // Check if the server name points to a homeserver. + server_name_or_url = match protocol { + UrlScheme::Http => format!("http://{server_name}"), + UrlScheme::Https => format!("https://{server_name}"), + } + } + } + } + + // When discovery fails, or the input isn't a valid server name, fallback to + // trying a homeserver URL. + if let Ok(homeserver_url) = Url::parse(&server_name_or_url) { + // Make sure the URL is definitely for a homeserver. + if check_is_homeserver(&homeserver_url, http_client).await { + return Ok((homeserver_url.to_string(), None)); + } + } + + Err(discovery_error.unwrap_or(ClientBuildError::InvalidServerName)) +} + +/// Creates a server name from a user supplied string. The string is first +/// sanitized by removing whitespace, the http(s) scheme and any trailing +/// slashes before being parsed. +pub fn sanitize_server_name(s: &str) -> crate::Result { + ServerName::parse( + s.trim().trim_start_matches("http://").trim_start_matches("https://").trim_end_matches('/'), + ) +} + /// Discovers a homeserver by looking up the well-known at the supplied server /// name. async fn discover_homeserver( @@ -764,6 +785,7 @@ impl ClientBuildError { // The http mocking library is not supported for wasm32 #[cfg(all(test, not(target_arch = "wasm32")))] pub(crate) mod tests { + use assert_matches::assert_matches; use matrix_sdk_test::{async_test, test_json}; use serde_json::{json_internal, Value as JsonValue}; use wiremock::{ @@ -773,8 +795,28 @@ pub(crate) mod tests { use super::*; - // Note: Due to a limitation of the http mocking library these tests all supply an http:// url, - // to `server_name_or_homeserver_url` rather than the plain server name, + #[test] + fn test_sanitize_server_name() { + assert_eq!(sanitize_server_name("matrix.org").unwrap().as_str(), "matrix.org"); + assert_eq!(sanitize_server_name("https://matrix.org").unwrap().as_str(), "matrix.org"); + assert_eq!(sanitize_server_name("http://matrix.org").unwrap().as_str(), "matrix.org"); + assert_eq!( + sanitize_server_name("https://matrix.server.org").unwrap().as_str(), + "matrix.server.org" + ); + assert_eq!( + sanitize_server_name("https://matrix.server.org/").unwrap().as_str(), + "matrix.server.org" + ); + assert_eq!( + sanitize_server_name(" https://matrix.server.org// ").unwrap().as_str(), + "matrix.server.org" + ); + assert_matches!(sanitize_server_name("https://matrix.server.org/something"), Err(_)) + } + + // Note: Due to a limitation of the http mocking library the following tests all + // supply an http:// url, to `server_name_or_homeserver_url` rather than the plain server name, // otherwise the builder will prepend https:// and the request will fail. In practice, this // isn't a problem as the builder first strips the scheme and then checks if the // name is a valid server name, so it is a close enough approximation. @@ -789,7 +831,7 @@ pub(crate) mod tests { let error = builder.build().await.unwrap_err(); // Then the operation should fail due to the invalid server name. - assert!(matches!(error, ClientBuildError::InvalidServerName)); + assert_matches!(error, ClientBuildError::InvalidServerName); } #[async_test] @@ -803,7 +845,7 @@ pub(crate) mod tests { // Then the operation should fail with an HTTP error. println!("{error}"); - assert!(matches!(error, ClientBuildError::Http(_))); + assert_matches!(error, ClientBuildError::Http(_)); } #[async_test] @@ -818,7 +860,7 @@ pub(crate) mod tests { let error = builder.build().await.unwrap_err(); // Then the operation should fail with a server discovery error. - assert!(matches!(error, ClientBuildError::AutoDiscovery(FromHttpResponseError::Server(_)))); + assert_matches!(error, ClientBuildError::AutoDiscovery(FromHttpResponseError::Server(_))); } #[async_test] @@ -829,13 +871,13 @@ pub(crate) mod tests { // When building a client with the server's URL. builder = builder.server_name_or_homeserver_url(homeserver.uri()); - let client = builder.build().await.unwrap(); + let _client = builder.build().await.unwrap(); // Then a client should be built without support for sliding sync or OIDC. #[cfg(feature = "experimental-sliding-sync")] - assert!(client.sliding_sync_proxy().is_none()); + assert!(_client.sliding_sync_proxy().is_none()); #[cfg(feature = "experimental-oidc")] - assert!(client.oidc().authentication_server_info().is_none()); + assert!(_client.oidc().authentication_server_info().is_none()); } #[async_test] @@ -851,13 +893,13 @@ pub(crate) mod tests { // When building a client with the server's URL. builder = builder.server_name_or_homeserver_url(homeserver.uri()); - let client = builder.build().await.unwrap(); + let _client = builder.build().await.unwrap(); // Then a client should be built with support for sliding sync. #[cfg(feature = "experimental-sliding-sync")] - assert_eq!(client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); + assert_eq!(_client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); #[cfg(feature = "experimental-oidc")] - assert!(client.oidc().authentication_server_info().is_none()); + assert!(_client.oidc().authentication_server_info().is_none()); } #[async_test] @@ -880,10 +922,10 @@ pub(crate) mod tests { let error = builder.build().await.unwrap_err(); // Then the operation should fail due to the well-known file's contents. - assert!(matches!( + assert_matches!( error, ClientBuildError::AutoDiscovery(FromHttpResponseError::Deserialization(_)) - )); + ); } #[async_test] @@ -906,13 +948,13 @@ pub(crate) mod tests { // When building a client with the base server. builder = builder.server_name_or_homeserver_url(server.uri()); - let client = builder.build().await.unwrap(); + let _client = builder.build().await.unwrap(); // Then a client should be built without support for sliding sync or OIDC. #[cfg(feature = "experimental-sliding-sync")] - assert!(client.sliding_sync_proxy().is_none()); + assert!(_client.sliding_sync_proxy().is_none()); #[cfg(feature = "experimental-oidc")] - assert!(client.oidc().authentication_server_info().is_none()); + assert!(_client.oidc().authentication_server_info().is_none()); } #[async_test] @@ -935,13 +977,13 @@ pub(crate) mod tests { // When building a client with the base server. builder = builder.server_name_or_homeserver_url(server.uri()); - let client = builder.build().await.unwrap(); + let _client = builder.build().await.unwrap(); // Then a client should be built with support for sliding sync. #[cfg(feature = "experimental-sliding-sync")] - assert_eq!(client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); + assert_eq!(_client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); #[cfg(feature = "experimental-oidc")] - assert!(client.oidc().authentication_server_info().is_none()); + assert!(_client.oidc().authentication_server_info().is_none()); } #[async_test] @@ -997,14 +1039,14 @@ pub(crate) mod tests { // When building a client with the base server. builder = builder.server_name_or_homeserver_url(server.uri()); - let client = builder.build().await.unwrap(); + let _client = builder.build().await.unwrap(); // Then a client should be built with support for both sliding sync and OIDC. #[cfg(feature = "experimental-sliding-sync")] - assert_eq!(client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); + assert_eq!(_client.sliding_sync_proxy(), Some("https://localhost:1234".parse().unwrap())); #[cfg(feature = "experimental-oidc")] assert_eq!( - client.oidc().authentication_server_info().unwrap().issuer, + _client.oidc().authentication_server_info().unwrap().issuer, "https://localhost:5678".to_owned() ); } diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index 87383c6149b..1d81a20e1b7 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -77,6 +77,7 @@ use crate::{ config::RequestConfig, deduplicating_handler::DeduplicatingHandler, error::{HttpError, HttpResult}, + event_cache::EventCache, event_handler::{ EventHandler, EventHandlerDropGuard, EventHandlerHandle, EventHandlerStore, SyncEvent, }, @@ -89,14 +90,14 @@ use crate::{ }; #[cfg(feature = "e2e-encryption")] use crate::{ - encryption::{Encryption, EncryptionData, EncryptionSettings}, + encryption::{Encryption, EncryptionData, EncryptionSettings, VerificationState}, store_locks::CrossProcessStoreLock, }; mod builder; pub(crate) mod futures; -pub use self::builder::{ClientBuildError, ClientBuilder}; +pub use self::builder::{sanitize_server_name, ClientBuildError, ClientBuilder}; #[cfg(not(target_arch = "wasm32"))] type NotificationHandlerFut = Pin + Send>>; @@ -234,6 +235,9 @@ pub(crate) struct ClientInner { /// The Matrix versions the server supports (well-known ones only) server_versions: OnceCell>, + /// The unstable features and their on/off state on the server + unstable_features: OnceCell>, + /// Collection of locks individual client methods might want to use, either /// to ensure that only a single call to a method happens at once or to /// deduplicate multiple calls to a method. @@ -267,9 +271,18 @@ pub(crate) struct ClientInner { /// store. pub(crate) sync_beat: event_listener::Event, + /// A central cache for events, inactive first. + /// + /// It becomes active when [`EventCache::subscribe`] is called. + pub(crate) event_cache: OnceCell, + /// End-to-end encryption related state. #[cfg(feature = "e2e-encryption")] pub(crate) e2ee: EncryptionData, + + /// The verification state of our own device. + #[cfg(feature = "e2e-encryption")] + pub(crate) verification_state: SharedObservable, } impl ClientInner { @@ -279,14 +292,16 @@ impl ClientInner { /// upon instantiation of a sub-client, e.g. a client specialized for /// notifications. #[allow(clippy::too_many_arguments)] - fn new( + async fn new( auth_ctx: Arc, homeserver: Url, #[cfg(feature = "experimental-sliding-sync")] sliding_sync_proxy: Option, http_client: HttpClient, base_client: BaseClient, server_versions: Option>, + unstable_features: Option>, respect_login_well_known: bool, + event_cache: OnceCell, #[cfg(feature = "e2e-encryption")] encryption_settings: EncryptionSettings, ) -> Arc { let client = Self { @@ -298,6 +313,7 @@ impl ClientInner { base_client, locks: Default::default(), server_versions: OnceCell::new_with(server_versions), + unstable_features: OnceCell::new_with(unstable_features), typing_notice_times: Default::default(), event_handlers: Default::default(), notification_handlers: Default::default(), @@ -307,8 +323,11 @@ impl ClientInner { room_updates_sender: broadcast::Sender::new(32), respect_login_well_known, sync_beat: event_listener::Event::new(), + event_cache, #[cfg(feature = "e2e-encryption")] e2ee: EncryptionData::new(encryption_settings), + #[cfg(feature = "e2e-encryption")] + verification_state: SharedObservable::new(VerificationState::Unknown), }; #[allow(clippy::let_and_return)] @@ -317,6 +336,8 @@ impl ClientInner { #[cfg(feature = "e2e-encryption")] client.e2ee.initialize_room_key_tasks(&client); + let _ = client.event_cache.get_or_init(|| async { EventCache::new(&client) }).await; + client } } @@ -344,7 +365,7 @@ impl Client { /// Returns a subscriber that publishes an event every time the ignore user /// list changes. - pub fn subscribe_to_ignore_user_list_changes(&self) -> Subscriber<()> { + pub fn subscribe_to_ignore_user_list_changes(&self) -> Subscriber> { self.inner.base_client.subscribe_to_ignore_user_list_changes() } @@ -1160,12 +1181,10 @@ impl Client { /// # Examples /// /// ```no_run - /// use matrix_sdk::Client; - /// - /// # use matrix_sdk::ruma::api::client::room::{ - /// # create_room::v3::Request as CreateRoomRequest, - /// # Visibility, - /// # }; + /// use matrix_sdk::{ + /// ruma::api::client::room::create_room::v3::Request as CreateRoomRequest, + /// Client, + /// }; /// # use url::Url; /// # /// # async { @@ -1393,6 +1412,67 @@ impl Client { Ok(server_versions) } + /// Fetch unstable_features from homeserver + async fn request_unstable_features(&self) -> HttpResult> { + let unstable_features: BTreeMap = self + .inner + .http_client + .send( + get_supported_versions::Request::new(), + None, + self.homeserver().to_string(), + None, + &[MatrixVersion::V1_0], + Default::default(), + ) + .await? + .unstable_features; + + Ok(unstable_features) + } + + /// Get unstable features from `request_unstable_features` or cache + /// + /// # Examples + /// + /// ```no_run + /// # use matrix_sdk::{Client, config::SyncSettings}; + /// # use url::Url; + /// # async { + /// # let homeserver = Url::parse("http://localhost:8080")?; + /// # let mut client = Client::new(homeserver).await?; + /// let unstable_features = client.unstable_features().await?; + /// let msc_x = unstable_features.get("msc_x").unwrap_or(&false); + /// # anyhow::Ok(()) }; + /// ``` + pub async fn unstable_features(&self) -> HttpResult<&BTreeMap> { + let unstable_features = self + .inner + .unstable_features + .get_or_try_init(|| self.request_unstable_features()) + .await?; + + Ok(unstable_features) + } + + /// Check whether MSC 4028 is enabled on the homeserver. + /// + /// # Examples + /// + /// ```no_run + /// # use matrix_sdk::{Client, config::SyncSettings}; + /// # use url::Url; + /// # async { + /// # let homeserver = Url::parse("http://localhost:8080")?; + /// # let mut client = Client::new(homeserver).await?; + /// let msc4028_enabled = + /// client.can_homeserver_push_encrypted_event_to_device().await?; + /// # anyhow::Ok(()) }; + /// ``` + pub async fn can_homeserver_push_encrypted_event_to_device(&self) -> HttpResult { + Ok(self.unstable_features().await?.get("org.matrix.msc4028").copied().unwrap_or(false)) + } + /// Get information of all our own devices. /// /// # Examples @@ -1986,19 +2066,25 @@ impl Client { /// Create a new specialized `Client` that can process notifications. pub async fn notification_client(&self) -> Result { + #[cfg(feature = "experimental-sliding-sync")] + let sliding_sync_proxy = self.inner.sliding_sync_proxy.read().unwrap().clone(); + let client = Client { inner: ClientInner::new( self.inner.auth_ctx.clone(), self.homeserver(), #[cfg(feature = "experimental-sliding-sync")] - self.inner.sliding_sync_proxy.read().unwrap().clone(), + sliding_sync_proxy, self.inner.http_client.clone(), self.inner.base_client.clone_with_in_memory_state_store(), self.inner.server_versions.get().cloned(), + self.inner.unstable_features.get().cloned(), self.inner.respect_login_well_known, + self.inner.event_cache.clone(), #[cfg(feature = "e2e-encryption")] self.inner.e2ee.encryption_settings, - ), + ) + .await, }; // Copy the parent's session meta into the child. This initializes the in-memory @@ -2014,6 +2100,12 @@ impl Client { Ok(client) } + + /// The [`EventCache`] instance for this [`Client`]. + pub fn event_cache(&self) -> &EventCache { + // SAFETY: always initialized in the `Client` ctor. + self.inner.event_cache.get().unwrap() + } } // The http mocking library is not supported for wasm32 @@ -2250,4 +2342,39 @@ pub(crate) mod tests { assert_eq!(result.avatar_url.clone().unwrap().to_string(), "mxc://example.me/someid"); assert!(!response.limited); } + + #[async_test] + async fn test_request_unstable_features() { + let server = MockServer::start().await; + let client = logged_in_client(Some(server.uri())).await; + + Mock::given(method("GET")) + .and(path("_matrix/client/versions")) + .respond_with( + ResponseTemplate::new(200).set_body_json(&*test_json::api_responses::VERSIONS), + ) + .mount(&server) + .await; + let unstable_features = client.request_unstable_features().await.unwrap(); + + assert_eq!(unstable_features.get("org.matrix.e2e_cross_signing"), Some(&true)); + assert_eq!(unstable_features, client.unstable_features().await.unwrap().clone()); + } + + #[async_test] + async fn test_can_homeserver_push_encrypted_event_to_device() { + let server = MockServer::start().await; + let client = logged_in_client(Some(server.uri())).await; + + Mock::given(method("GET")) + .and(path("_matrix/client/versions")) + .respond_with( + ResponseTemplate::new(200).set_body_json(&*test_json::api_responses::VERSIONS), + ) + .mount(&server) + .await; + + let msc4028_enabled = client.can_homeserver_push_encrypted_event_to_device().await.unwrap(); + assert!(msc4028_enabled); + } } diff --git a/crates/matrix-sdk/src/config/request.rs b/crates/matrix-sdk/src/config/request.rs index add9072c102..226d26a99b8 100644 --- a/crates/matrix-sdk/src/config/request.rs +++ b/crates/matrix-sdk/src/config/request.rs @@ -98,7 +98,8 @@ impl RequestConfig { self } - /// The number of times a request should be retried. The default is no limit + /// The number of times a request should be retried. The default is no + /// limit. #[must_use] pub fn retry_limit(mut self, retry_limit: u64) -> Self { self.retry_limit = Some(retry_limit); diff --git a/crates/matrix-sdk/src/docs/encryption.md b/crates/matrix-sdk/src/docs/encryption.md index 79145768d1d..0858260e6a3 100644 --- a/crates/matrix-sdk/src/docs/encryption.md +++ b/crates/matrix-sdk/src/docs/encryption.md @@ -1,6 +1,6 @@ End-to-end encryption related types -Matrix has support for end-to-end encrypted messaging, this module contains +Matrix has support for end-to-end encrypted messaging. This module contains types related to end-to-end encryption, describes a bit how E2EE works in the matrix-sdk, and how to set your [`Client`] up to support E2EE. @@ -53,12 +53,12 @@ been introduced. ## Room keys Room keys remove the need to encrypt each message for each *end*. -Instead a room key needs to be shared with each *end*, after that a message +Instead a room key needs to be shared with each *end*, and after that a message can be encrypted in a single, O(1), step. -A room key is backed by a [Megolm] session, which in turn consists two -parts. The first part, the outbound group session is used for encryption, -this one never leaves your device. The second part is the inbound group +A room key is backed by a [Megolm] session, which consists of two +parts. The first part, the outbound group session, is used for encryption. +This part never leaves your device. The second part is the inbound group session, which is shared with each *end*. ```text @@ -83,7 +83,7 @@ comes first. ### Decrypting the room history Since room keys get relatively often rotated, each room key will need to be -stored, otherwise we won't be able to decrypt historical messages. The SDK +stored — otherwise we won't be able to decrypt historical messages. The SDK stores all room keys locally in an encrypted manner. Besides storing them as part of the SDK store, users can export room keys @@ -95,8 +95,8 @@ One important aspect of end-to-end encryption is to check that the *end* you are communicating with is indeed the person you expect. This checking is done in Matrix via interactive verification. While interactively verifying, we'll need to exchange some critical piece of information over another -communication channel, over the phone, or in person are good candidates -for such a channel. +communication channel. (Good ways to make this exchange would be in person or +via a phone call.) Usually each *end* will need to verify every *end* it communicates with. An *end* is represented as a [`Device`] in the matrix-sdk. This gets rather @@ -120,11 +120,11 @@ communication between Alice and Bob to be considered secure. ``` -To simplify things and lower the amount of devices a user needs to verify +To simplify things and lower the amount of devices a user needs to verify, cross signing has been introduced. Cross signing adds a concept of a user identity which is represented in the matrix-sdk using the [`UserIdentity`] -struct. This way Alice and Bob only need to verify their own devices and -each others user identity for the communication to be considered secure. +struct. This way, Alice and Bob only need to verify their own devices and +each other's user identity for the communication to be considered secure. ```text @@ -149,7 +149,7 @@ More info about devices and identities can be found in the [`identities`] module. To add interactive verification support to your client please see the -[`verification`] module, also check out the documentation for the +[`verification`] module. Also check out the documentation for the [`Device::is_verified()`] method, which explains in more detail what it means for a [`Device`] to be verified. @@ -166,23 +166,23 @@ to work. 1. Make sure the `e2e-encryption` feature is enabled. 2. To persist the encryption keys, you can use [`ClientBuilder::store_config`] - or one of the other `_store` methods on [`ClientBuilder`]. + or one of the `_store` methods on [`ClientBuilder`]. ## Restoring a client -Restoring a Client is relatively easy, still some things need to be kept in -mind before doing so. +Restoring a Client is relatively easy, but there are some things that need to be +kept in mind before doing so. There are two ways one might wish to restore a [`Client`]: 1. Using an access token 2. Using the password -Initially, logging in creates a device ID and access token on the server, -those two are directly connected to each other, more on this relationship +Initially, logging in creates a device ID and access token on the server. +Those two are directly connected to each other — more on this relationship can be found in the [spec]. -After we log in the client will upload the end-to-end encryption related +After we log in, the client will upload the end-to-end encryption related [device keys] to the server. Those device keys cannot be replaced once they have been uploaded and tied to a device ID. @@ -207,7 +207,7 @@ the device ID. This will replace the access token from the previous login call, but won't create a new device. -**Note** that the default store supports only a single device, logging in +**Note** that the default store supports only a single device. Logging in with a different device ID (either `None` or a device ID of another client) is **not** supported using the default store. @@ -215,10 +215,10 @@ is **not** supported using the default store. | Failure | Cause | Fix | | ------------------- | ----- | ----------- | -| No messages get encrypted nor decrypted | The `e2e-encryption` feature is disabled | [Enable the feature in your `Cargo.toml` file] | +| No messages get encrypted or decrypted | The `e2e-encryption` feature is disabled | [Enable the feature in your `Cargo.toml` file] | | Messages that were decryptable aren't after a restart | Storage isn't setup to be persistent | Ensure you've activated the persistent storage backend feature, e.g. `sqlite` | | Messages are encrypted but can't be decrypted | The access token that the client is using is tied to another device | Clear storage to create a new device, read the [Restoring a Client] section | -| Messages don't get encrypted but get decrypted | The `m.room.encryption` event is missing | Make sure encryption is [enabled] for the room and the event isn't [filtered] out, otherwise it might be a deserialization bug | +| Messages don't get encrypted but get decrypted | The `m.room.encryption` event is missing | Make sure encryption is [enabled] for the room and the event isn't [filtered] out. Otherwise it might be a deserialization bug | [Enable the feature in your `Cargo.toml` file]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#choosing-features [Megolm]: https://gitlab.matrix.org/matrix-org/olm/blob/master/docs/megolm.md diff --git a/crates/matrix-sdk/src/encryption/backups/mod.rs b/crates/matrix-sdk/src/encryption/backups/mod.rs index 45554d90196..0ddab70249c 100644 --- a/crates/matrix-sdk/src/encryption/backups/mod.rs +++ b/crates/matrix-sdk/src/encryption/backups/mod.rs @@ -451,17 +451,28 @@ impl Backups { for (room_id, room_keys) in backed_up_keys.rooms { for (session_id, room_key) in room_keys.sessions { - // TODO: Log that we're skipping some keys here. - let Ok(room_key) = room_key.deserialize() else { - warn!("Couldn't deserialize a room key we downloaded from backups, session ID: {session_id}"); - continue; + let room_key = match room_key.deserialize() { + Ok(k) => k, + Err(e) => { + warn!( + "Couldn't deserialize a room key we downloaded from backups, session \ + ID: {session_id}, error: {e:?}" + ); + continue; + } }; - let Ok(room_key) = - backup_decryption_key.decrypt_session_data(room_key.session_data) - else { - continue; - }; + let room_key = + match backup_decryption_key.decrypt_session_data(room_key.session_data) { + Ok(k) => k, + Err(e) => { + warn!( + "Couldn't decrypt a room key we downloaded from backups, session \ + ID: {session_id}, error: {e:?}" + ); + continue; + } + }; decrypted_room_keys .entry(room_id.to_owned()) diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index 762ce132040..5ce64635908 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -24,7 +24,7 @@ use std::{ sync::{Arc, Mutex as StdMutex}, }; -use eyeball::SharedObservable; +use eyeball::{SharedObservable, Subscriber}; use futures_core::Stream; use futures_util::{ future::try_join, @@ -186,6 +186,35 @@ pub enum BackupDownloadStrategy { Manual, } +/// The verification state of our own device +/// +/// This enum tells us if our own user identity trusts these devices, in other +/// words it tells us if the user identity has signed the device. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VerificationState { + /// The verification state is unknown for now. + Unknown, + /// The device is considered to be verified, it has been signed by its user + /// identity. + Verified, + /// The device is unverified. + Unverified, +} + +/// Wraps together a `CrossProcessLockStoreGuard` and a generation number. +#[derive(Debug)] +pub struct CrossProcessLockStoreGuardWithGeneration { + _guard: CrossProcessStoreLockGuard, + generation: u64, +} + +impl CrossProcessLockStoreGuardWithGeneration { + /// Return the Crypto Store generation associated with this store lock. + pub fn generation(&self) -> u64 { + self.generation + } +} + impl Client { pub(crate) async fn olm_machine(&self) -> RwLockReadGuard<'_, Option> { self.base_client().olm_machine().await @@ -222,7 +251,7 @@ impl Client { let response = self.send(request, None).await?; self.mark_request_as_sent(request_id, &response).await?; - self.encryption().recovery().update_state_after_keys_query(&response).await; + self.encryption().update_state_after_keys_query(&response).await; Ok(response) } @@ -611,6 +640,32 @@ impl Encryption { } } + /// Get a [`Subscriber`] for the [`VerificationState`]. + /// + /// # Examples + /// + /// ```no_run + /// use matrix_sdk::{encryption, Client}; + /// use url::Url; + /// + /// # async { + /// let homeserver = Url::parse("http://example.com")?; + /// let client = Client::new(homeserver).await?; + /// let mut subscriber = client.encryption().verification_state(); + /// + /// let current_value = subscriber.get(); + /// + /// println!("The current verification state is: {current_value:?}"); + /// + /// if let Some(verification_state) = subscriber.next().await { + /// println!("Received verification state update {:?}", verification_state) + /// } + /// # anyhow::Ok(()) }; + /// ``` + pub fn verification_state(&self) -> Subscriber { + self.client.inner.verification_state.subscribe() + } + /// Get a verification object with the given flow id. pub async fn get_verification(&self, user_id: &UserId, flow_id: &str) -> Option { let olm = self.client.olm_machine().await; @@ -1063,7 +1118,7 @@ impl Encryption { let olm = self.client.olm_machine().await; let olm = olm.as_ref().ok_or(Error::NoOlmMachine)?; - let keys = olm.export_room_keys(predicate).await?; + let keys = olm.store().export_room_keys(predicate).await?; let passphrase = zeroize::Zeroizing::new(passphrase.to_owned()); let encrypt = move || -> Result<()> { @@ -1171,7 +1226,7 @@ impl Encryption { if prev_holder == lock_value { return Ok(()); } - warn!("recreating cross-process store lock with a different holder value: prev was {prev_holder}, new is {lock_value}"); + warn!("Recreating cross-process store lock with a different holder value: prev was {prev_holder}, new is {lock_value}"); } let olm_machine = self.client.base_client().olm_machine().await; @@ -1206,21 +1261,30 @@ impl Encryption { /// Maybe reload the `OlmMachine` after acquiring the lock for the first /// time. - async fn on_lock_newly_acquired(&self) -> Result<(), Error> { + /// + /// Returns the current generation number. + async fn on_lock_newly_acquired(&self) -> Result { let olm_machine_guard = self.client.olm_machine().await; if let Some(olm_machine) = olm_machine_guard.as_ref() { - // If the crypto store generation has changed, - if olm_machine + let (new_gen, generation_number) = olm_machine .maintain_crypto_store_generation(&self.client.locks().crypto_store_generation) - .await? - { + .await?; + // If the crypto store generation has changed, + if new_gen { // (get rid of the reference to the current crypto store first) drop(olm_machine_guard); // Recreate the OlmMachine. self.client.base_client().regenerate_olm().await?; } + Ok(generation_number) + } else { + // XXX: not sure this is reachable. Seems like the OlmMachine should always have + // been initialised by the time we get here. Ideally we'd panic, or return an + // error, but for now I'm just adding some logging to check if it + // happens, and returning the magic number 0. + warn!("Encryption::on_lock_newly_acquired: called before OlmMachine initialised"); + Ok(0) } - Ok(()) } /// If a lock was created with [`Self::enable_cross_process_store_lock`], @@ -1231,13 +1295,13 @@ impl Encryption { pub async fn spin_lock_store( &self, max_backoff: Option, - ) -> Result, Error> { + ) -> Result, Error> { if let Some(lock) = self.client.locks().cross_process_crypto_store_lock.get() { let guard = lock.spin_lock(max_backoff).await?; - self.on_lock_newly_acquired().await?; + let generation = self.on_lock_newly_acquired().await?; - Ok(Some(guard)) + Ok(Some(CrossProcessLockStoreGuardWithGeneration { _guard: guard, generation })) } else { Ok(None) } @@ -1247,15 +1311,19 @@ impl Encryption { /// attempts to lock it once. /// /// Returns a guard to the lock, if it was obtained. - pub async fn try_lock_store_once(&self) -> Result, Error> { + pub async fn try_lock_store_once( + &self, + ) -> Result, Error> { if let Some(lock) = self.client.locks().cross_process_crypto_store_lock.get() { let maybe_guard = lock.try_lock_once().await?; - if maybe_guard.is_some() { - self.on_lock_newly_acquired().await?; - } + let Some(guard) = maybe_guard else { + return Ok(None); + }; + + let generation = self.on_lock_newly_acquired().await?; - Ok(maybe_guard) + Ok(Some(CrossProcessLockStoreGuardWithGeneration { _guard: guard, generation })) } else { Ok(None) } @@ -1309,6 +1377,8 @@ impl Encryption { if let Err(e) = this.recovery().setup().await { error!("Couldn't setup and resume recovery {e:?}"); } + + this.update_verification_state().await; })); Ok(()) @@ -1321,7 +1391,43 @@ impl Encryption { if let Some(task) = task { if let Err(err) = task.await { - warn!("error when initializing backups: {err}"); + warn!("Error when initializing backups: {err}"); + } + } + } + + pub(crate) async fn update_state_after_keys_query(&self, response: &get_keys::v3::Response) { + self.recovery().update_state_after_keys_query(response).await; + + // Only update the verification_state if our own devices changed + if let Some(user_id) = self.client.user_id() { + let contains_own_device = response.device_keys.contains_key(user_id); + + if contains_own_device { + self.update_verification_state().await; + } + } + } + + async fn update_verification_state(&self) { + match self.get_own_device().await { + Ok(device) => { + if let Some(device) = device { + let is_verified = device.is_cross_signed_by_owner(); + + if is_verified { + self.client.inner.verification_state.set(VerificationState::Verified); + } else { + self.client.inner.verification_state.set(VerificationState::Unverified); + } + } else { + warn!("Couldn't find out own device in the store."); + self.client.inner.verification_state.set(VerificationState::Unknown); + } + } + Err(error) => { + warn!("Failed retrieving own device: {error}"); + self.client.inner.verification_state.set(VerificationState::Unknown); } } } diff --git a/crates/matrix-sdk/src/event_cache/linked_chunk.rs b/crates/matrix-sdk/src/event_cache/linked_chunk.rs new file mode 100644 index 00000000000..f253c2c8133 --- /dev/null +++ b/crates/matrix-sdk/src/event_cache/linked_chunk.rs @@ -0,0 +1,1366 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(dead_code)] + +use std::{ + fmt, + marker::PhantomData, + ops::Not, + ptr::NonNull, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; + +/// Errors of [`LinkedChunk`]. +#[derive(Debug)] +pub enum LinkedChunkError { + InvalidChunkIdentifier { identifier: ChunkIdentifier }, + ChunkIsAGap { identifier: ChunkIdentifier }, + ChunkIsItems { identifier: ChunkIdentifier }, + InvalidItemIndex { index: usize }, +} + +/// The [`LinkedChunk`] structure. +/// +/// It is similar to a linked list, except that it contains many items instead +/// of a single one. A chunk has a maximum capacity of `CAPACITY`. Once a chunk +/// is full, a new chunk is created. Not all chunks are necessarily entirely +/// full. +pub struct LinkedChunk { + /// The first chunk. + first: NonNull>, + /// The last chunk. + last: Option>>, + /// The number of items hold by this linked chunk. + length: usize, + /// The generator of chunk identifiers. + chunk_identifier_generator: ChunkIdentifierGenerator, + /// Marker. + marker: PhantomData>>, +} + +impl LinkedChunk { + /// Create a new [`Self`]. + pub fn new() -> Self { + Self { + // INVARIANT: The first chunk must always be an Items, not a Gap. + first: Chunk::new_items_leaked(ChunkIdentifierGenerator::FIRST_IDENTIFIER), + last: None, + length: 0, + chunk_identifier_generator: ChunkIdentifierGenerator::new_from_scratch(), + marker: PhantomData, + } + } + + /// Get the number of items in this linked chunk. + pub fn len(&self) -> usize { + self.length + } + + /// Push items at the end of the [`LinkedChunk`], i.e. on the last + /// chunk. + /// + /// If the last chunk doesn't have enough space to welcome all `items`, + /// then new chunks can be created (and linked appropriately). + pub fn push_items_back(&mut self, items: I) + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let items = items.into_iter(); + let number_of_items = items.len(); + + let chunk_identifier_generator = self.chunk_identifier_generator.clone(); + + let last_chunk = self.latest_chunk_mut(); + + // Push the items. + let last_chunk = last_chunk.push_items(items, &chunk_identifier_generator); + + debug_assert!(last_chunk.is_last_chunk(), "`last_chunk` must be… the last chunk"); + + // We need to update `self.last` if and only if `last_chunk` _is not_ the first + // chunk, and _is_ the last chunk (ensured by the `debug_assert!` above). + if last_chunk.is_first_chunk().not() { + // Maybe `last_chunk` is the same as the previous `self.last` chunk, but it's + // OK. + self.last = Some(NonNull::from(last_chunk)); + } + + self.length += number_of_items; + } + + /// Push a gap at the end of the [`LinkedChunk`], i.e. after the last + /// chunk. + pub fn push_gap_back(&mut self) { + let next_identifier = self.chunk_identifier_generator.generate_next().unwrap(); + + let last_chunk = self.latest_chunk_mut(); + last_chunk.insert_next(Chunk::new_gap_leaked(next_identifier)); + + self.last = last_chunk.next; + } + + /// Insert items at a specified position in the [`LinkedChunk`]. + /// + /// Because the `position` can be invalid, this method returns a + /// `Result`. + pub fn insert_items_at( + &mut self, + items: I, + position: ItemPosition, + ) -> Result<(), LinkedChunkError> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let chunk_identifier = position.chunk_identifier(); + let item_index = position.item_index(); + + let chunk_identifier_generator = self.chunk_identifier_generator.clone(); + + let chunk = self + .chunk_mut(chunk_identifier) + .ok_or(LinkedChunkError::InvalidChunkIdentifier { identifier: chunk_identifier })?; + + let (chunk, number_of_items) = match &mut chunk.content { + ChunkContent::Gap => { + return Err(LinkedChunkError::ChunkIsAGap { identifier: chunk_identifier }) + } + ChunkContent::Items(current_items) => { + let current_items_length = current_items.len(); + + if item_index >= current_items_length { + return Err(LinkedChunkError::InvalidItemIndex { index: item_index }); + } + + // The `ItemPosition` is computed from the latest items. Here, we manipulate the + // items in their original order: the last item comes last. Let's adjust + // `item_index`. + let item_index = current_items_length - 1 - item_index; + + // Split the items. + let detached_items = current_items.split_off(item_index); + + // Prepare the items to be pushed. + let items = items.into_iter(); + let number_of_items = items.len(); + + ( + chunk + // Push the new items. + .push_items(items, &chunk_identifier_generator) + // Finally, push the items that have been detached. + .push_items(detached_items.into_iter(), &chunk_identifier_generator), + number_of_items, + ) + } + }; + + // We need to update `self.last` if and only if `chunk` _is not_ the first + // chunk, and _is_ the last chunk. + if chunk.is_first_chunk().not() && chunk.is_last_chunk() { + // Maybe `chunk` is the same as the previous `self.last` chunk, but it's + // OK. + self.last = Some(NonNull::from(chunk)); + } + + self.length += number_of_items; + + Ok(()) + } + + /// Insert a gap at a specified position in the [`LinkedChunk`]. + /// + /// Because the `position` can be invalid, this method returns a + /// `Result`. + pub fn insert_gap_at(&mut self, position: ItemPosition) -> Result<(), LinkedChunkError> { + let chunk_identifier = position.chunk_identifier(); + let item_index = position.item_index(); + + let chunk_identifier_generator = self.chunk_identifier_generator.clone(); + + let chunk = self + .chunk_mut(chunk_identifier) + .ok_or(LinkedChunkError::InvalidChunkIdentifier { identifier: chunk_identifier })?; + + let chunk = match &mut chunk.content { + ChunkContent::Gap => { + return Err(LinkedChunkError::ChunkIsAGap { identifier: chunk_identifier }); + } + ChunkContent::Items(current_items) => { + let current_items_length = current_items.len(); + + if item_index >= current_items_length { + return Err(LinkedChunkError::InvalidItemIndex { index: item_index }); + } + + // The `ItemPosition` is computed from the latest items. Here, we manipulate the + // items in their original order: the last item comes last. Let's adjust + // `item_index`. + let item_index = current_items_length - 1 - item_index; + + // Split the items. + let detached_items = current_items.split_off(item_index); + + chunk + // Insert a new gap chunk. + .insert_next(Chunk::new_gap_leaked( + chunk_identifier_generator.generate_next().unwrap(), + )) + // Insert a new items chunk. + .insert_next(Chunk::new_items_leaked( + chunk_identifier_generator.generate_next().unwrap(), + )) + // Finally, push the items that have been detached. + .push_items(detached_items.into_iter(), &chunk_identifier_generator) + } + }; + + // We need to update `self.last` if and only if `chunk` _is not_ the first + // chunk, and _is_ the last chunk. + if chunk.is_first_chunk().not() && chunk.is_last_chunk() { + // Maybe `chunk` is the same as the previous `self.last` chunk, but it's + // OK. + self.last = Some(NonNull::from(chunk)); + } + + Ok(()) + } + + /// Replace the gap identified by `chunk_identifier`, by items. + /// + /// Because the `chunk_identifier` can represent non-gap chunk, this method + /// returns a `Result`. + pub fn replace_gap_at( + &mut self, + items: I, + chunk_identifier: ChunkIdentifier, + ) -> Result<(), LinkedChunkError> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let chunk_identifier_generator = self.chunk_identifier_generator.clone(); + let chunk_ptr; + + { + let chunk = self + .chunk_mut(chunk_identifier) + .ok_or(LinkedChunkError::InvalidChunkIdentifier { identifier: chunk_identifier })?; + + debug_assert!(chunk.is_first_chunk().not(), "A gap cannot be the first chunk"); + + let (previous, number_of_items) = match &mut chunk.content { + ChunkContent::Gap => { + let items = items.into_iter(); + let number_of_items = items.len(); + + // Find the previous chunk… + // + // SAFETY: `unwrap` is safe because we are ensured `chunk` is not the first + // chunk, so a previous chunk always exists. + let previous = chunk.previous_mut().unwrap(); + + // … and insert the items on it. + (previous.push_items(items, &chunk_identifier_generator), number_of_items) + } + ChunkContent::Items(..) => { + return Err(LinkedChunkError::ChunkIsItems { identifier: chunk_identifier }) + } + }; + + // Get the pointer to `chunk` via `previous`. + // + // SAFETY: `unwrap` is safe because we are ensured the next of the previous + // chunk is `chunk` itself. + chunk_ptr = previous.next.unwrap(); + + // Get the pointer to the `previous` via `chunk`. + let previous_ptr = chunk.previous; + + // Now that new items have been pushed, we can unlink the gap chunk. + chunk.unlink(); + + // Update `self.last` if the gap chunk was the last chunk. + if chunk.is_last_chunk() { + self.last = previous_ptr; + } + + self.length += number_of_items; + + // Stop borrowing `chunk`. + } + + // Re-box the chunk, and let Rust does its job. + // + // SAFETY: `chunk` is unlinked but it still exists in memory! We have its + // pointer, which is valid and well aligned. + let _chunk_boxed = unsafe { Box::from_raw(chunk_ptr.as_ptr()) }; + + Ok(()) + } + + /// Get the chunk as a reference, from its identifier, if it exists. + fn chunk(&self, identifier: ChunkIdentifier) -> Option<&Chunk> { + let mut chunk = self.latest_chunk(); + + loop { + if chunk.identifier() == identifier { + return Some(chunk); + } + + chunk = chunk.previous()?; + } + } + + /// Get the chunk as a mutable reference, from its identifier, if it exists. + fn chunk_mut(&mut self, identifier: ChunkIdentifier) -> Option<&mut Chunk> { + let mut chunk = self.latest_chunk_mut(); + + loop { + if chunk.identifier() == identifier { + return Some(chunk); + } + + chunk = chunk.previous_mut()?; + } + } + + /// Search for a chunk, and return its identifier. + pub fn chunk_identifier<'a, P>(&'a self, mut predicate: P) -> Option + where + P: FnMut(&'a Chunk) -> bool, + { + self.rchunks().find_map(|chunk| predicate(chunk).then_some(chunk.identifier())) + } + + /// Search for an item, and return its position. + pub fn item_position<'a, P>(&'a self, mut predicate: P) -> Option + where + P: FnMut(&'a T) -> bool, + { + self.ritems().find_map(|(item_position, item)| predicate(item).then_some(item_position)) + } + + /// Iterate over the chunks, backward. + /// + /// It iterates from the last to the first chunk. + pub fn rchunks(&self) -> LinkedChunkIterBackward<'_, T, C> { + self.rchunks_from(self.latest_chunk().identifier()) + .expect("`iter_chunks_from` cannot fail because at least one empty chunk must exist") + } + + /// Iterate over the chunks, starting from `identifier`, backward. + /// + /// It iterates from the chunk with the identifier `identifier` to the first + /// chunk. + pub fn rchunks_from( + &self, + identifier: ChunkIdentifier, + ) -> Result, LinkedChunkError> { + Ok(LinkedChunkIterBackward::new( + self.chunk(identifier) + .ok_or(LinkedChunkError::InvalidChunkIdentifier { identifier })?, + )) + } + + /// Iterate over the chunks, starting from `position`, forward. + /// + /// It iterates from the chunk with the identifier `identifier` to the last + /// chunk. + pub fn chunks_from( + &self, + identifier: ChunkIdentifier, + ) -> Result, LinkedChunkError> { + Ok(LinkedChunkIter::new( + self.chunk(identifier) + .ok_or(LinkedChunkError::InvalidChunkIdentifier { identifier })?, + )) + } + + /// Iterate over the items, backward. + /// + /// It iterates from the last to the first item. + pub fn ritems(&self) -> impl Iterator { + self.ritems_from(ItemPosition(self.latest_chunk().identifier(), 0)) + .expect("`iter_items_from` cannot fail because at least one empty chunk must exist") + } + + /// Iterate over the items, starting from `position`, backward. + /// + /// It iterates from the item at `position` to the first item. + pub fn ritems_from( + &self, + position: ItemPosition, + ) -> Result, LinkedChunkError> { + Ok(self + .rchunks_from(position.chunk_identifier())? + .filter_map(|chunk| match &chunk.content { + ChunkContent::Gap => None, + ChunkContent::Items(items) => { + Some(items.iter().rev().enumerate().map(move |(item_index, item)| { + (ItemPosition(chunk.identifier(), item_index), item) + })) + } + }) + .flatten() + .skip(position.item_index())) + } + + /// Iterate over the items, starting from `position`, forward. + /// + /// It iterates from the item at `position` to the last item. + pub fn items_from( + &self, + position: ItemPosition, + ) -> Result, LinkedChunkError> { + Ok(self + .chunks_from(position.chunk_identifier())? + .filter_map(|chunk| match &chunk.content { + ChunkContent::Gap => None, + ChunkContent::Items(items) => { + Some(items.iter().rev().enumerate().rev().map(move |(item_index, item)| { + (ItemPosition(chunk.identifier(), item_index), item) + })) + } + }) + .flatten()) + } + + /// Get the latest chunk, as an immutable reference. + fn latest_chunk(&self) -> &Chunk { + unsafe { self.last.unwrap_or(self.first).as_ref() } + } + + /// Get the latest chunk, as a mutable reference. + fn latest_chunk_mut(&mut self) -> &mut Chunk { + unsafe { self.last.as_mut().unwrap_or(&mut self.first).as_mut() } + } +} + +impl Drop for LinkedChunk { + fn drop(&mut self) { + // Take the latest chunk. + let mut current_chunk_ptr = self.last.or(Some(self.first)); + + // As long as we have another chunk… + while let Some(chunk_ptr) = current_chunk_ptr { + // Disconnect the chunk by updating `previous_chunk.next` pointer. + let previous_ptr = unsafe { chunk_ptr.as_ref() }.previous; + + if let Some(mut previous_ptr) = previous_ptr { + unsafe { previous_ptr.as_mut() }.next = None; + } + + // Re-box the chunk, and let Rust does its job. + let _chunk_boxed = unsafe { Box::from_raw(chunk_ptr.as_ptr()) }; + + // Update the `current_chunk_ptr`. + current_chunk_ptr = previous_ptr; + } + + // At this step, all chunks have been dropped, including + // `self.first`. + } +} + +/// A [`LinkedChunk`] can be safely sent over thread boundaries if `T: Send`. +/// The only unsafe part if around the `NonNull`, but the API and the lifetimes +/// to deref them are designed safely. +unsafe impl Send for LinkedChunk {} + +/// A [`LinkedChunk`] can be safely share between threads if `T: Sync`. +/// The only unsafe part if around the `NonNull`, but the API and the lifetimes +/// to deref them are designed safely. +unsafe impl Sync for LinkedChunk {} + +/// Generator for [`Chunk`]'s identifier. +/// +/// Each [`Chunk`] has a unique identifier. This generator generates the unique +/// identifiers. +/// +/// In order to keep good performance, a unique identifier is simply a `u64` +/// (see [`ChunkIdentifier`]). Generating a new unique identifier boils down to +/// incrementing by one the previous identifier. Note that this is not an index: +/// it _is_ an identifier. +/// +/// Cloning this type is shallow, and thus cheap. +#[derive(Clone)] +struct ChunkIdentifierGenerator { + next: Arc, +} + +impl ChunkIdentifierGenerator { + /// The first identifier. + const FIRST_IDENTIFIER: ChunkIdentifier = ChunkIdentifier(0); + + /// Create the generator assuming the current [`LinkedChunk`] it belongs to + /// is empty. + pub fn new_from_scratch() -> Self { + Self { next: Arc::new(AtomicU64::new(Self::FIRST_IDENTIFIER.0)) } + } + + /// Create the generator assuming the current [`LinkedChunk`] it belongs to + /// is not empty, i.e. it already has some [`Chunk`] in it. + pub fn new_from_previous_chunk_identifier(last_chunk_identifier: ChunkIdentifier) -> Self { + Self { next: Arc::new(AtomicU64::new(last_chunk_identifier.0)) } + } + + /// Generate the next unique identifier. + /// + /// Note that it can fail if there is no more unique identifier available. + /// In this case, `Result::Err` contains the previous unique identifier. + pub fn generate_next(&self) -> Result { + let previous = self.next.fetch_add(1, Ordering::Relaxed); + let current = self.next.load(Ordering::Relaxed); + + // Check for overflows. + // unlikely — TODO: call `std::intrinsics::unlikely` once it's stable. + if current < previous { + return Err(ChunkIdentifier(previous)); + } + + Ok(ChunkIdentifier(current)) + } +} + +/// The unique identifier of a chunk in a [`LinkedChunk`]. +/// +/// It is not the position of the chunk, just its unique identifier. +/// +/// Learn more with [`ChunkIdentifierGenerator`]. +#[derive(Copy, Clone, Debug, PartialEq)] +#[repr(transparent)] +pub struct ChunkIdentifier(u64); + +/// The position of an item in a [`LinkedChunk`]. +/// +/// It's a pair of a chunk position and an item index. `(…, 0)` represents +/// the last item in the chunk. +#[derive(Debug, PartialEq)] +pub struct ItemPosition(ChunkIdentifier, usize); + +impl ItemPosition { + /// Get the chunk identifier of the item. + pub fn chunk_identifier(&self) -> ChunkIdentifier { + self.0 + } + + /// Get the item index inside its chunk. + pub fn item_index(&self) -> usize { + self.1 + } +} + +/// An iterator over a [`LinkedChunk`] that traverses the chunk in backward +/// direction (i.e. it calls `previous` on each chunk to make progress). +pub struct LinkedChunkIterBackward<'a, T, const CHUNK_CAPACITY: usize> { + chunk: Option<&'a Chunk>, +} + +impl<'a, T, const C: usize> LinkedChunkIterBackward<'a, T, C> { + /// Create a new [`LinkedChunkIter`] from a particular [`Chunk`]. + fn new(from_chunk: &'a Chunk) -> Self { + Self { chunk: Some(from_chunk) } + } +} + +impl<'a, T, const C: usize> Iterator for LinkedChunkIterBackward<'a, T, C> { + type Item = &'a Chunk; + + fn next(&mut self) -> Option { + self.chunk.map(|chunk| { + self.chunk = chunk.previous(); + + chunk + }) + } +} + +/// An iterator over a [`LinkedChunk`] that traverses the chunk in forward +/// direction (i.e. it calls `next` on each chunk to make progress). +pub struct LinkedChunkIter<'a, T, const CHUNK_CAPACITY: usize> { + chunk: Option<&'a Chunk>, +} + +impl<'a, T, const C: usize> LinkedChunkIter<'a, T, C> { + /// Create a new [`LinkedChunkIter`] from a particular [`Chunk`]. + fn new(from_chunk: &'a Chunk) -> Self { + Self { chunk: Some(from_chunk) } + } +} + +impl<'a, T, const C: usize> Iterator for LinkedChunkIter<'a, T, C> { + type Item = &'a Chunk; + + fn next(&mut self) -> Option { + self.chunk.map(|chunk| { + self.chunk = chunk.next(); + + chunk + }) + } +} + +/// This enum represents the content of a [`Chunk`]. +#[derive(Debug)] +pub enum ChunkContent { + /// The chunk represents a gap in the linked chunk, i.e. a hole. It + /// means that some items are missing in this location. + Gap, + + /// The chunk contains items. + Items(Vec), +} + +/// A chunk is a node in the [`LinkedChunk`]. +pub struct Chunk { + /// The previous chunk. + previous: Option>>, + + /// The next chunk. + next: Option>>, + + /// Unique identifier. + identifier: ChunkIdentifier, + + /// The content of the chunk. + content: ChunkContent, +} + +impl Chunk { + /// Create a new gap chunk. + fn new_gap(identifier: ChunkIdentifier) -> Self { + Self::new(identifier, ChunkContent::Gap) + } + + /// Create a new items chunk. + fn new_items(identifier: ChunkIdentifier) -> Self { + Self::new(identifier, ChunkContent::Items(Vec::with_capacity(CAPACITY))) + } + + fn new(identifier: ChunkIdentifier, content: ChunkContent) -> Self { + Self { previous: None, next: None, identifier, content } + } + + /// Create a new gap chunk, but box it and leak it. + fn new_gap_leaked(identifier: ChunkIdentifier) -> NonNull { + let chunk = Self::new_gap(identifier); + let chunk_box = Box::new(chunk); + + NonNull::from(Box::leak(chunk_box)) + } + + /// Create a new items chunk, but box it and leak it. + fn new_items_leaked(identifier: ChunkIdentifier) -> NonNull { + let chunk = Self::new_items(identifier); + let chunk_box = Box::new(chunk); + + NonNull::from(Box::leak(chunk_box)) + } + + /// Check whether this current chunk is a gap chunk. + fn is_gap(&self) -> bool { + matches!(self.content, ChunkContent::Gap) + } + + /// Check whether this current chunk is an items chunk. + fn is_items(&self) -> bool { + !self.is_gap() + } + + /// Check whether this current chunk is the first chunk. + fn is_first_chunk(&self) -> bool { + self.previous.is_none() + } + + /// Check whether this current chunk is the last chunk. + fn is_last_chunk(&self) -> bool { + self.next.is_none() + } + + /// Get the unique identifier of the chunk. + fn identifier(&self) -> ChunkIdentifier { + self.identifier + } + + /// The length of the chunk, i.e. how many items are in it. + /// + /// It will always return 0 if it's a gap chunk. + fn len(&self) -> usize { + match &self.content { + ChunkContent::Gap => 0, + ChunkContent::Items(items) => items.len(), + } + } + + /// Push items on the current chunk. + /// + /// If the chunk doesn't have enough spaces to welcome `new_items`, new + /// chunk will be inserted next, and correctly linked. + /// + /// This method returns the last inserted chunk if any, or the current + /// chunk. Basically, it returns the chunk onto which new computations + /// must happen. + /// + /// Pushing items will always create new chunks if necessary, but it + /// will never merge them, so that we avoid updating too much chunks. + fn push_items( + &mut self, + mut new_items: I, + chunk_identifier_generator: &ChunkIdentifierGenerator, + ) -> &mut Self + where + I: Iterator + ExactSizeIterator, + { + let number_of_new_items = new_items.len(); + let chunk_length = self.len(); + + // A small optimisation. Skip early if there is no new items. + if number_of_new_items == 0 { + return self; + } + + match &mut self.content { + // Cannot push items on a `Gap`. Let's insert a new `Items` chunk to push the + // items onto it. + ChunkContent::Gap => { + self + // Insert a new items chunk. + .insert_next(Self::new_items_leaked( + chunk_identifier_generator.generate_next().unwrap(), + )) + // Now push the new items on the next chunk, and return the result of + // `push_items`. + .push_items(new_items, chunk_identifier_generator) + } + + ChunkContent::Items(items) => { + // Calculate the free space of the current chunk. + let free_space = CAPACITY.saturating_sub(chunk_length); + + // There is enough space to push all the new items. + if number_of_new_items <= free_space { + items.extend(new_items); + + // Return the current chunk. + self + } else { + if free_space > 0 { + // Take all possible items to fill the free space. + items.extend(new_items.by_ref().take(free_space)); + } + + self + // Insert a new items chunk. + .insert_next(Self::new_items_leaked( + chunk_identifier_generator.generate_next().unwrap(), + )) + // Now push the rest of the new items on the next chunk, and return the + // result of `push_items`. + .push_items(new_items, chunk_identifier_generator) + } + } + } + } + + /// Insert a new chunk after the current one. + /// + /// The respective [`Self::previous`] and [`Self::next`] of the current + /// and new chunk will be updated accordingly. + fn insert_next(&mut self, mut new_chunk_ptr: NonNull) -> &mut Chunk { + let new_chunk = unsafe { new_chunk_ptr.as_mut() }; + + // Update the next chunk if any. + if let Some(next_chunk) = self.next_mut() { + // Link back to the new chunk. + next_chunk.previous = Some(new_chunk_ptr); + + // Link the new chunk to the next chunk. + new_chunk.next = self.next; + } + + // Link to the new chunk. + self.next = Some(new_chunk_ptr); + // Link the new chunk to this one. + new_chunk.previous = Some(NonNull::from(self)); + + new_chunk + } + + /// Unlink this chunk. + /// + /// Be careful: `self` won't belong to `LinkedChunk` anymore, and should be + /// dropped appropriately. + fn unlink(&mut self) { + let previous_ptr = self.previous; + let next_ptr = self.next; + + if let Some(previous) = self.previous_mut() { + previous.next = next_ptr; + } + + if let Some(next) = self.next_mut() { + next.previous = previous_ptr; + } + } + + /// Get a reference to the previous chunk if any. + fn previous(&self) -> Option<&Self> { + self.previous.map(|non_null| unsafe { non_null.as_ref() }) + } + + /// Get a mutable to the previous chunk if any. + fn previous_mut(&mut self) -> Option<&mut Self> { + self.previous.as_mut().map(|non_null| unsafe { non_null.as_mut() }) + } + + /// Get a reference to the next chunk if any. + fn next(&self) -> Option<&Self> { + self.next.map(|non_null| unsafe { non_null.as_ref() }) + } + + /// Get a mutable reference to the next chunk if any. + fn next_mut(&mut self) -> Option<&mut Self> { + self.next.as_mut().map(|non_null| unsafe { non_null.as_mut() }) + } +} + +impl fmt::Debug for LinkedChunk +where + T: fmt::Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + formatter + .debug_struct("LinkedChunk") + .field("first (deref)", unsafe { self.first.as_ref() }) + .field("last", &self.last) + .field("length", &self.length) + .finish() + } +} + +impl fmt::Debug for Chunk +where + T: fmt::Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + formatter + .debug_struct("Chunk") + .field("identifier", &self.identifier) + .field("content", &self.content) + .field("previous", &self.previous) + .field("ptr", &std::ptr::from_ref(self)) + .field("next", &self.next) + .field("next (deref)", &self.next.as_ref().map(|non_null| unsafe { non_null.as_ref() })) + .finish() + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::{ + Chunk, ChunkContent, ChunkIdentifier, ChunkIdentifierGenerator, ItemPosition, LinkedChunk, + LinkedChunkError, + }; + + macro_rules! assert_items_eq { + ( @_ [ $iterator:ident, $chunk_index:ident, $item_index:ident ] { [-] $( $rest:tt )* } { $( $accumulator:tt )* } ) => { + assert_items_eq!( + @_ + [ $iterator, $chunk_index, $item_index ] + { $( $rest )* } + { + $( $accumulator )* + $chunk_index += 1; + } + ) + }; + + ( @_ [ $iterator:ident, $chunk_index:ident, $item_index:ident ] { [ $( $item:expr ),* ] $( $rest:tt )* } { $( $accumulator:tt )* } ) => { + assert_items_eq!( + @_ + [ $iterator, $chunk_index, $item_index ] + { $( $rest )* } + { + $( $accumulator )* + let _expected_chunk_identifier = $iterator .peek().unwrap().1.chunk_identifier(); + $( + assert_matches!( + $iterator .next(), + Some((chunk_index, ItemPosition(chunk_identifier, item_index), & $item )) => { + // Ensure the chunk index (from the enumeration) is correct. + assert_eq!(chunk_index, $chunk_index); + // Ensure the chunk identifier is the same for all items in this chunk. + assert_eq!(chunk_identifier, _expected_chunk_identifier); + // Ensure the item has the expected position. + assert_eq!(item_index, $item_index); + } + ); + $item_index += 1; + )* + $item_index = 0; + $chunk_index += 1; + } + ) + }; + + ( @_ [ $iterator:ident, $chunk_index:ident, $item_index:ident ] {} { $( $accumulator:tt )* } ) => { + { + let mut $chunk_index = 0; + let mut $item_index = 0; + $( $accumulator )* + } + }; + + ( $linked_chunk:expr, $( $all:tt )* ) => { + assert_items_eq!( + @_ + [ iterator, _chunk_index, _item_index ] + { $( $all )* } + { + let mut iterator = $linked_chunk + .chunks_from(ChunkIdentifierGenerator::FIRST_IDENTIFIER) + .unwrap() + .enumerate() + .filter_map(|(chunk_index, chunk)| match &chunk.content { + ChunkContent::Gap => None, + ChunkContent::Items(items) => { + Some(items.iter().enumerate().map(move |(item_index, item)| { + (chunk_index, ItemPosition(chunk.identifier(), item_index), item) + })) + } + }) + .flatten() + .peekable(); + } + ) + } + } + + #[test] + fn test_chunk_identifier_generator() { + let generator = ChunkIdentifierGenerator::new_from_scratch(); + + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(1))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(2))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(3))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(4))); + + let generator = + ChunkIdentifierGenerator::new_from_previous_chunk_identifier(ChunkIdentifier(42)); + + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(43))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(44))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(45))); + assert_eq!(generator.generate_next(), Ok(ChunkIdentifier(46))); + } + + #[test] + fn test_empty() { + let items = LinkedChunk::::new(); + + assert_eq!(items.len(), 0); + + // This test also ensures that `Drop` for `LinkedChunk` works when + // there is only one chunk. + } + + #[test] + fn test_push_items() { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a']); + + assert_items_eq!(linked_chunk, ['a']); + + linked_chunk.push_items_back(['b', 'c']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c']); + + linked_chunk.push_items_back(['d', 'e']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] ['d', 'e']); + + linked_chunk.push_items_back(['f', 'g', 'h', 'i', 'j']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] ['d', 'e', 'f'] ['g', 'h', 'i'] ['j']); + + assert_eq!(linked_chunk.len(), 10); + } + + #[test] + fn test_push_gap() { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a']); + assert_items_eq!(linked_chunk, ['a']); + + linked_chunk.push_gap_back(); + assert_items_eq!(linked_chunk, ['a'] [-]); + + linked_chunk.push_items_back(['b', 'c', 'd', 'e']); + assert_items_eq!(linked_chunk, ['a'] [-] ['b', 'c', 'd'] ['e']); + + linked_chunk.push_gap_back(); + linked_chunk.push_gap_back(); // why not + assert_items_eq!(linked_chunk, ['a'] [-] ['b', 'c', 'd'] ['e'] [-] [-]); + + linked_chunk.push_items_back(['f', 'g', 'h', 'i']); + assert_items_eq!(linked_chunk, ['a'] [-] ['b', 'c', 'd'] ['e'] [-] [-] ['f', 'g', 'h'] ['i']); + + assert_eq!(linked_chunk.len(), 9); + } + + #[test] + fn test_identifiers_and_positions() { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b', 'c', 'd', 'e', 'f']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['g', 'h', 'i', 'j']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] ['d', 'e', 'f'] [-] ['g', 'h', 'i'] ['j']); + + assert_eq!(linked_chunk.chunk_identifier(Chunk::is_gap), Some(ChunkIdentifier(2))); + assert_eq!( + linked_chunk.item_position(|item| *item == 'e'), + Some(ItemPosition(ChunkIdentifier(1), 1)) + ); + } + + #[test] + fn test_rchunks() { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = linked_chunk.rchunks(); + + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(3), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['e']); + } + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(2), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['c', 'd']); + } + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(1), content: ChunkContent::Gap, .. }) + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(0), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['a', 'b']); + } + ); + assert_matches!(iterator.next(), None); + } + + #[test] + fn test_rchunks_from() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = linked_chunk.rchunks_from( + linked_chunk.item_position(|item| *item == 'c').unwrap().chunk_identifier(), + )?; + + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(2), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['c', 'd']); + } + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(1), content: ChunkContent::Gap, .. }) + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(0), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['a', 'b']); + } + ); + assert_matches!(iterator.next(), None); + + Ok(()) + } + + #[test] + fn test_chunks_from() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = linked_chunk.chunks_from( + linked_chunk.item_position(|item| *item == 'c').unwrap().chunk_identifier(), + )?; + + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(2), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['c', 'd']); + } + ); + assert_matches!( + iterator.next(), + Some(Chunk { identifier: ChunkIdentifier(3), content: ChunkContent::Items(items), .. }) => { + assert_eq!(items, &['e']); + } + ); + assert_matches!(iterator.next(), None); + + Ok(()) + } + + #[test] + fn test_ritems() { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = linked_chunk.ritems(); + + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(3), 0), 'e'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(2), 0), 'd'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(2), 1), 'c'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(0), 0), 'b'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(0), 1), 'a'))); + assert_matches!(iterator.next(), None); + } + + #[test] + fn test_ritems_from() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = + linked_chunk.ritems_from(linked_chunk.item_position(|item| *item == 'c').unwrap())?; + + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(2), 1), 'c'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(0), 0), 'b'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(0), 1), 'a'))); + assert_matches!(iterator.next(), None); + + Ok(()) + } + + #[test] + fn test_items_from() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['c', 'd', 'e']); + + let mut iterator = + linked_chunk.items_from(linked_chunk.item_position(|item| *item == 'c').unwrap())?; + + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(2), 1), 'c'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(2), 0), 'd'))); + assert_matches!(iterator.next(), Some((ItemPosition(ChunkIdentifier(3), 0), 'e'))); + assert_matches!(iterator.next(), None); + + Ok(()) + } + + #[test] + fn test_insert_items_at() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b', 'c', 'd', 'e', 'f']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] ['d', 'e', 'f']); + + // Insert inside the last chunk. + { + let position_of_e = linked_chunk.item_position(|item| *item == 'e').unwrap(); + + // Insert 4 elements, so that it overflows the chunk capacity. It's important to + // see whether chunks are correctly updated and linked. + linked_chunk.insert_items_at(['w', 'x', 'y', 'z'], position_of_e)?; + + assert_items_eq!( + linked_chunk, + ['a', 'b', 'c'] ['d', 'w', 'x'] ['y', 'z', 'e'] ['f'] + ); + assert_eq!(linked_chunk.len(), 10); + } + + // Insert inside the first chunk. + { + let position_of_a = linked_chunk.item_position(|item| *item == 'a').unwrap(); + linked_chunk.insert_items_at(['l', 'm', 'n', 'o'], position_of_a)?; + + assert_items_eq!( + linked_chunk, + ['l', 'm', 'n'] ['o', 'a', 'b'] ['c'] ['d', 'w', 'x'] ['y', 'z', 'e'] ['f'] + ); + assert_eq!(linked_chunk.len(), 14); + } + + // Insert inside a middle chunk. + { + let position_of_c = linked_chunk.item_position(|item| *item == 'c').unwrap(); + linked_chunk.insert_items_at(['r', 's'], position_of_c)?; + + assert_items_eq!( + linked_chunk, + ['l', 'm', 'n'] ['o', 'a', 'b'] ['r', 's', 'c'] ['d', 'w', 'x'] ['y', 'z', 'e'] ['f'] + ); + assert_eq!(linked_chunk.len(), 16); + } + + // Insert in a chunk that does not exist. + { + assert_matches!( + linked_chunk.insert_items_at(['u', 'v'], ItemPosition(ChunkIdentifier(128), 0)), + Err(LinkedChunkError::InvalidChunkIdentifier { identifier: ChunkIdentifier(128) }) + ); + } + + // Insert in a chunk that exists, but at an item that does not exist. + { + assert_matches!( + linked_chunk.insert_items_at(['u', 'v'], ItemPosition(ChunkIdentifier(0), 128)), + Err(LinkedChunkError::InvalidItemIndex { index: 128 }) + ); + } + + // Insert in a gap. + { + // Add a gap to test the error. + linked_chunk.push_gap_back(); + assert_items_eq!( + linked_chunk, + ['l', 'm', 'n'] ['o', 'a', 'b'] ['r', 's', 'c'] ['d', 'w', 'x'] ['y', 'z', 'e'] ['f'] [-] + ); + + assert_matches!( + linked_chunk.insert_items_at(['u', 'v'], ItemPosition(ChunkIdentifier(6), 0),), + Err(LinkedChunkError::ChunkIsAGap { identifier: ChunkIdentifier(6) }) + ); + } + + assert_eq!(linked_chunk.len(), 16); + + Ok(()) + } + + #[test] + fn test_insert_gap_at() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b', 'c', 'd', 'e', 'f']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] ['d', 'e', 'f']); + + // Insert in the middle of a chunk. + { + let position_of_b = linked_chunk.item_position(|item| *item == 'b').unwrap(); + linked_chunk.insert_gap_at(position_of_b)?; + + assert_items_eq!(linked_chunk, ['a'] [-] ['b', 'c'] ['d', 'e', 'f']); + } + + // Insert at the beginning of a chunk. + { + let position_of_a = linked_chunk.item_position(|item| *item == 'a').unwrap(); + linked_chunk.insert_gap_at(position_of_a)?; + + assert_items_eq!(linked_chunk, [] [-] ['a'] [-] ['b', 'c'] ['d', 'e', 'f']); + } + + // Insert in a chunk that does not exist. + { + assert_matches!( + linked_chunk.insert_items_at(['u', 'v'], ItemPosition(ChunkIdentifier(128), 0)), + Err(LinkedChunkError::InvalidChunkIdentifier { identifier: ChunkIdentifier(128) }) + ); + } + + // Insert in a chunk that exists, but at an item that does not exist. + { + assert_matches!( + linked_chunk.insert_items_at(['u', 'v'], ItemPosition(ChunkIdentifier(0), 128)), + Err(LinkedChunkError::InvalidItemIndex { index: 128 }) + ); + } + + // Insert in an existing gap. + { + // It is impossible to get the item position inside a gap. It's only possible if + // the item position is crafted by hand or is outdated. + let position_of_a_gap = ItemPosition(ChunkIdentifier(4), 0); + assert_matches!( + linked_chunk.insert_gap_at(position_of_a_gap), + Err(LinkedChunkError::ChunkIsAGap { identifier: ChunkIdentifier(4) }) + ); + } + + assert_eq!(linked_chunk.len(), 6); + + Ok(()) + } + + #[test] + fn test_replace_gap_at() -> Result<(), LinkedChunkError> { + let mut linked_chunk = LinkedChunk::::new(); + linked_chunk.push_items_back(['a', 'b', 'c']); + linked_chunk.push_gap_back(); + linked_chunk.push_items_back(['l', 'm', 'n']); + assert_items_eq!(linked_chunk, ['a', 'b', 'c'] [-] ['l', 'm', 'n']); + + // Replace a gap in the middle of the linked chunk. + { + let gap_identifier = linked_chunk.chunk_identifier(Chunk::is_gap).unwrap(); + assert_eq!(gap_identifier, ChunkIdentifier(1)); + + linked_chunk.replace_gap_at(['d', 'e', 'f', 'g', 'h'], gap_identifier)?; + assert_items_eq!( + linked_chunk, + ['a', 'b', 'c'] ['d', 'e', 'f'] ['g', 'h'] ['l', 'm', 'n'] + ); + } + + // Replace a gap at the end of the linked chunk. + { + linked_chunk.push_gap_back(); + assert_items_eq!( + linked_chunk, + ['a', 'b', 'c'] ['d', 'e', 'f'] ['g', 'h'] ['l', 'm', 'n'] [-] + ); + + let gap_identifier = linked_chunk.chunk_identifier(Chunk::is_gap).unwrap(); + assert_eq!(gap_identifier, ChunkIdentifier(5)); + + linked_chunk.replace_gap_at(['w', 'x', 'y', 'z'], gap_identifier)?; + assert_items_eq!( + linked_chunk, + ['a', 'b', 'c'] ['d', 'e', 'f'] ['g', 'h'] ['l', 'm', 'n'] ['w', 'x', 'y'] ['z'] + ); + } + + assert_eq!(linked_chunk.len(), 15); + + Ok(()) + } +} diff --git a/crates/matrix-sdk/src/event_cache/mod.rs b/crates/matrix-sdk/src/event_cache/mod.rs new file mode 100644 index 00000000000..8951c5672c0 --- /dev/null +++ b/crates/matrix-sdk/src/event_cache/mod.rs @@ -0,0 +1,1006 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The event cache is an abstraction layer, sitting between the Rust SDK and a +//! final client, that acts as a global observer of all the rooms, gathering and +//! inferring some extra useful information about each room. In particular, this +//! doesn't require subscribing to a specific room to get access to this +//! information. +//! +//! It's intended to be fast, robust and easy to maintain. +//! +//! See the [github issue](https://github.com/matrix-org/matrix-rust-sdk/issues/3058) for more details about the historical reasons that led us to start writing this. +//! +//! Most of it is still a work-in-progress, as of 2024-01-22. +//! +//! The desired set of features it may eventually implement is the following: +//! +//! - [ ] compute proper unread room counts, and use backpagination to get +//! missing messages/notifications/mentions, if needs be. +//! - [ ] expose that information with a new data structure similar to the +//! `RoomInfo`, and that may update a `RoomListService`. +//! - [ ] provide read receipts for each message. +//! - [x] backwards pagination +//! - [ ] forward pagination +//! - [ ] reconcile results with cached timelines. +//! - [ ] retry decryption upon receiving new keys (from an encryption sync +//! service or from a key backup). +//! - [ ] expose the latest event for a given room. +//! - [ ] caching of events on-disk. + +#![forbid(missing_docs)] + +use std::{ + collections::BTreeMap, + fmt::Debug, + sync::{Arc, OnceLock, Weak}, + time::Duration, +}; + +use matrix_sdk_base::{ + deserialized_responses::{AmbiguityChange, SyncTimelineEvent, TimelineEvent}, + sync::{JoinedRoomUpdate, LeftRoomUpdate, RoomUpdates, Timeline}, +}; +use matrix_sdk_common::executor::{spawn, JoinHandle}; +use ruma::{ + assign, + events::{AnyRoomAccountDataEvent, AnySyncEphemeralRoomEvent}, + serde::Raw, + OwnedEventId, OwnedRoomId, RoomId, +}; +use tokio::{ + sync::{ + broadcast::{error::RecvError, Receiver, Sender}, + Mutex, Notify, RwLock, + }, + time::timeout, +}; +use tracing::{error, instrument, trace, warn}; + +use self::store::{EventCacheStore, MemoryStore, TimelineEntry}; +use crate::{ + client::ClientInner, event_cache::store::PaginationToken, room::MessagesOptions, Client, Room, +}; + +mod linked_chunk; +mod store; + +/// An error observed in the [`EventCache`]. +#[derive(thiserror::Error, Debug)] +pub enum EventCacheError { + /// The [`EventCache`] instance hasn't been initialized with + /// [`EventCache::subscribe`] + #[error( + "The EventCache hasn't subscribed to sync responses yet, call `EventCache::subscribe()`" + )] + NotSubscribedYet, + + /// The room hasn't been found in the client. + /// + /// Technically, it's possible to request a `RoomEventCache` for a room that + /// is not known to the client, leading to this error. + #[error("Room {0} hasn't been found in the Client.")] + RoomNotFound(OwnedRoomId), + + /// The given back-pagination token is unknown to the event cache. + #[error("The given back-pagination token is unknown to the event cache.")] + UnknownBackpaginationToken, + + /// The [`EventCache`] owns a weak reference to the [`Client`] it pertains + /// to. It's possible this weak reference points to nothing anymore, at + /// times where we try to use the client. + #[error("The owning client of the event cache has been dropped.")] + ClientDropped, + + /// Another error caused by the SDK happened somewhere, and we report it to + /// the caller. + #[error("SDK error: {0}")] + SdkError(#[source] crate::Error), +} + +/// A result using the [`EventCacheError`]. +pub type Result = std::result::Result; + +/// Hold handles to the tasks spawn by a [`RoomEventCache`]. +pub struct EventCacheDropHandles { + listen_updates_task: JoinHandle<()>, +} + +impl Debug for EventCacheDropHandles { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EventCacheDropHandles").finish_non_exhaustive() + } +} + +impl Drop for EventCacheDropHandles { + fn drop(&mut self) { + self.listen_updates_task.abort(); + } +} + +/// An event cache, providing lots of useful functionality for clients. +/// +/// Cloning is shallow, and thus is cheap to do. +/// +/// See also the module-level comment. +#[derive(Clone)] +pub struct EventCache { + /// Reference to the inner cache. + inner: Arc, +} + +impl Debug for EventCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EventCache").finish_non_exhaustive() + } +} + +impl EventCache { + /// Create a new [`EventCache`] for the given client. + pub(crate) fn new(client: &Arc) -> Self { + let store = Arc::new(MemoryStore::new()); + let inner = Arc::new(EventCacheInner { + client: Arc::downgrade(client), + by_room: Default::default(), + store: Arc::new(Mutex::new(store)), + drop_handles: Default::default(), + }); + + Self { inner } + } + + /// Starts subscribing the [`EventCache`] to sync responses, if not done + /// before. + /// + /// Re-running this has no effect if we already subscribed before, and is + /// cheap. + pub fn subscribe(&self) -> Result<()> { + let client = self.inner.client()?; + + let _ = self.inner.drop_handles.get_or_init(|| { + // Spawn the task that will listen to all the room updates at once. + let room_updates_feed = client.subscribe_to_all_room_updates(); + let listen_updates_task = + spawn(Self::listen_task(self.inner.clone(), room_updates_feed)); + + Arc::new(EventCacheDropHandles { listen_updates_task }) + }); + + Ok(()) + } + + async fn listen_task( + inner: Arc, + mut room_updates_feed: Receiver, + ) { + trace!("Spawning the listen task"); + loop { + match room_updates_feed.recv().await { + Ok(updates) => { + if let Err(err) = inner.handle_room_updates(updates).await { + match err { + EventCacheError::ClientDropped => { + // The client has dropped, exit the listen task. + break; + } + err => { + error!("Error when handling room updates: {err}"); + } + } + } + } + + Err(RecvError::Lagged(_)) => { + // Forget everything we know; we could have missed events, and we have + // no way to reconcile at the moment! + // TODO: implement Smart Matching™, + let store = inner.store.lock().await; + let mut by_room = inner.by_room.write().await; + for room_id in by_room.keys() { + if let Err(err) = store.clear_room(room_id).await { + error!("unable to clear room after room updates lag: {err}"); + } + } + by_room.clear(); + } + + Err(RecvError::Closed) => { + // The sender has shut down, exit. + break; + } + } + } + } + + /// Return a room-specific view over the [`EventCache`]. + pub(crate) async fn for_room( + &self, + room_id: &RoomId, + ) -> Result<(Option, Arc)> { + let Some(drop_handles) = self.inner.drop_handles.get().cloned() else { + return Err(EventCacheError::NotSubscribedYet); + }; + + let room = self.inner.for_room(room_id).await?; + + Ok((room, drop_handles)) + } + + /// Add an initial set of events to the event cache, reloaded from a cache. + /// + /// TODO: temporary for API compat, as the event cache should take care of + /// its own store. + #[instrument(skip(self, events))] + pub async fn add_initial_events( + &self, + room_id: &RoomId, + events: Vec, + prev_batch: Option, + ) -> Result<()> { + let Some(room_cache) = self.inner.for_room(room_id).await? else { + warn!("unknown room, skipping"); + return Ok(()); + }; + + // We could have received events during a previous sync; remove them all, since + // we can't know where to insert the "initial events" with respect to + // them. + let store = self.inner.store.lock().await; + + store.clear_room(room_id).await?; + let _ = room_cache.inner.sender.send(RoomEventCacheUpdate::Clear); + + room_cache + .inner + .append_events( + &**store, + events, + prev_batch, + Default::default(), + Default::default(), + Default::default(), + ) + .await?; + + Ok(()) + } +} + +struct EventCacheInner { + /// A weak reference to the inner client, useful when trying to get a handle + /// on the owning client. + client: Weak, + + /// Lazily-filled cache of live [`RoomEventCache`], once per room. + by_room: RwLock>, + + /// Backend used for storage. + /// + /// [`Mutex`] is “fair”, as it is implemented as a FIFO. It is important to + /// ensure that multiple updates will be applied in the correct order, which + /// is enforced by taking the store lock when handling an update. + /// + /// TODO: replace with a cross-process lock + store: Arc>>, + + /// Handles to keep alive the task listening to updates. + drop_handles: OnceLock>, +} + +impl EventCacheInner { + fn client(&self) -> Result { + Ok(Client { inner: self.client.upgrade().ok_or(EventCacheError::ClientDropped)? }) + } + + /// Handles a single set of room updates at once. + #[instrument(skip(self, updates))] + async fn handle_room_updates(&self, updates: RoomUpdates) -> Result<()> { + // First, take the lock that indicates we're processing updates, to avoid + // handling multiple updates concurrently. + let store = self.store.lock().await; + + // Left rooms. + for (room_id, left_room_update) in updates.leave { + let Some(room) = self.for_room(&room_id).await? else { + warn!(%room_id, "missing left room"); + continue; + }; + + if let Err(err) = room.inner.handle_left_room_update(&**store, left_room_update).await { + // Non-fatal error, try to continue to the next room. + error!("handling left room update: {err}"); + } + } + + // Joined rooms. + for (room_id, joined_room_update) in updates.join { + let Some(room) = self.for_room(&room_id).await? else { + warn!(%room_id, "missing joined room"); + continue; + }; + + if let Err(err) = + room.inner.handle_joined_room_update(&**store, joined_room_update).await + { + // Non-fatal error, try to continue to the next room. + error!("handling joined room update: {err}"); + } + } + + // Invited rooms. + // TODO: we don't anything with `updates.invite` at this point. + + Ok(()) + } + + /// Return a room-specific view over the [`EventCache`]. + /// + /// It may not be found, if the room isn't known to the client, in which + /// case it'll return None. + async fn for_room(&self, room_id: &RoomId) -> Result> { + // Fast path: the entry exists; let's acquire a read lock, it's cheaper than a + // write lock. + let by_room_guard = self.by_room.read().await; + + match by_room_guard.get(room_id) { + Some(room) => Ok(Some(room.clone())), + + None => { + // Slow-path: the entry doesn't exist; let's acquire a write lock. + drop(by_room_guard); + let mut by_room_guard = self.by_room.write().await; + + // In the meanwhile, some other caller might have obtained write access and done + // the same, so check for existence again. + if let Some(room) = by_room_guard.get(room_id) { + return Ok(Some(room.clone())); + } + + let Some(room) = self.client()?.get_room(room_id) else { + return Ok(None); + }; + + let room_event_cache = RoomEventCache::new(room, self.store.clone()); + + by_room_guard.insert(room_id.to_owned(), room_event_cache.clone()); + + Ok(Some(room_event_cache)) + } + } + } +} + +/// A subset of an event cache, for a room. +/// +/// Cloning is shallow, and thus is cheap to do. +#[derive(Clone)] +pub struct RoomEventCache { + inner: Arc, +} + +impl Debug for RoomEventCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RoomEventCache").finish_non_exhaustive() + } +} + +impl RoomEventCache { + /// Create a new [`RoomEventCache`] using the given room and store. + fn new(room: Room, store: Arc>>) -> Self { + Self { inner: Arc::new(RoomEventCacheInner::new(room, store)) } + } + + /// Subscribe to room updates for this room, after getting the initial list + /// of events. XXX: Could/should it use some kind of `Observable` + /// instead? Or not something async, like explicit handlers as our event + /// handlers? + pub async fn subscribe( + &self, + ) -> Result<(Vec, Receiver)> { + let store = self.inner.store.lock().await; + + Ok((store.room_events(self.inner.room.room_id()).await?, self.inner.sender.subscribe())) + } + + /// Returns the oldest back-pagination token, that is, the one closest to + /// the beginning of the timeline as we know it. + /// + /// Optionally, wait at most for the given duration for a back-pagination + /// token to be returned by a sync. + pub async fn oldest_backpagination_token( + &self, + max_wait: Option, + ) -> Result> { + self.inner.oldest_backpagination_token(max_wait).await + } + + /// Back-paginate with the given token, if provided. + /// + /// If no token has been provided, it will back-paginate from the end of the + /// room. + /// + /// If a token has been provided, but it was unknown to the event cache + /// (i.e. it's not associated to any gap in the timeline stored by the + /// event cache), then an error result will be returned. + pub async fn backpaginate_with_token( + &self, + batch_size: u16, + token: Option, + ) -> Result { + self.inner.backpaginate_with_token(batch_size, token).await + } +} + +/// The (non-clonable) details of the `RoomEventCache`. +struct RoomEventCacheInner { + /// Sender part for subscribers to this room. + sender: Sender, + + /// Backend used for storage, shared with the parent [`EventCacheInner`]. + /// + /// See comment there. + store: Arc>>, + + /// The Client [`Room`] this event cache pertains to. + room: Room, + + /// A notifier that we received a new pagination token. + pagination_token_notifier: Notify, + + /// A lock that ensures we don't run multiple pagination queries at the same + /// time. + pagination_lock: Mutex<()>, +} + +impl RoomEventCacheInner { + /// Creates a new cache for a room, and subscribes to room updates, so as + /// to handle new timeline events. + fn new(room: Room, store: Arc>>) -> Self { + let sender = Sender::new(32); + Self { + room, + store, + sender, + pagination_lock: Default::default(), + pagination_token_notifier: Default::default(), + } + } + + async fn handle_joined_room_update( + &self, + store: &dyn EventCacheStore, + updates: JoinedRoomUpdate, + ) -> Result<()> { + self.handle_timeline( + store, + updates.timeline, + updates.ephemeral.clone(), + updates.account_data, + updates.ambiguity_changes, + ) + .await?; + Ok(()) + } + + async fn handle_timeline( + &self, + store: &dyn EventCacheStore, + timeline: Timeline, + ephemeral: Vec>, + account_data: Vec>, + ambiguity_changes: BTreeMap, + ) -> Result<()> { + if timeline.limited { + // Ideally we'd try to reconcile existing events against those received in the + // timeline, but we're not there yet. In the meanwhile, clear the + // items from the room. TODO: implement Smart Matching™. + trace!("limited timeline, clearing all previous events"); + + // Clear internal state (events, pagination tokens, etc.). + store.clear_room(self.room.room_id()).await?; + + // Propagate to observers. + let _ = self.sender.send(RoomEventCacheUpdate::Clear); + } + + // Add all the events to the backend. + trace!("adding new events"); + self.append_events( + store, + timeline.events, + timeline.prev_batch, + account_data, + ephemeral, + ambiguity_changes, + ) + .await?; + + Ok(()) + } + + async fn handle_left_room_update( + &self, + store: &dyn EventCacheStore, + updates: LeftRoomUpdate, + ) -> Result<()> { + self.handle_timeline( + store, + updates.timeline, + Vec::new(), + Vec::new(), + updates.ambiguity_changes, + ) + .await?; + Ok(()) + } + + /// Append a set of events to the room cache and storage, notifying + /// observers. + async fn append_events( + &self, + store: &dyn EventCacheStore, + events: Vec, + prev_batch: Option, + account_data: Vec>, + ephemeral: Vec>, + ambiguity_changes: BTreeMap, + ) -> Result<()> { + if events.is_empty() + && prev_batch.is_none() + && ephemeral.is_empty() + && account_data.is_empty() + && ambiguity_changes.is_empty() + { + return Ok(()); + } + + let room_id = self.room.room_id(); + + // Add the previous back-pagination token (if present), followed by the timeline + // events themselves. + let gap_with_token = prev_batch + .clone() + .map(|val| TimelineEntry::Gap { prev_token: PaginationToken(val) }) + .into_iter(); + + store + .append_room_entries( + room_id, + gap_with_token.chain(events.iter().cloned().map(TimelineEntry::Event)).collect(), + ) + .await?; + + if prev_batch.is_some() { + self.pagination_token_notifier.notify_one(); + } + + let _ = self.sender.send(RoomEventCacheUpdate::Append { + events, + account_data, + ephemeral, + ambiguity_changes, + }); + + Ok(()) + } + + /// Run a single back-pagination `/messages` request. + /// + /// This will only run one request; since a backpagination may need to + /// continue, it's preferable to use [`Self::backpaginate_until`]. + /// + /// Returns the number of messages received in this chunk. + #[instrument(skip(self))] + async fn backpaginate_with_token( + &self, + batch_size: u16, + token: Option, + ) -> Result { + // Make sure there's at most one back-pagination request. + let _guard = self.pagination_lock.lock().await; + + if let Some(token) = token.as_ref() { + let store = self.store.lock().await; + if !store.contains_gap(self.room.room_id(), token).await? { + return Err(EventCacheError::UnknownBackpaginationToken); + } + } + + let messages = self + .room + .messages(assign!(MessagesOptions::backward(), { + from: token.as_ref().map(|token| token.0.clone()), + limit: batch_size.into() + })) + .await + .map_err(EventCacheError::SdkError)?; + + // Would we want to backpaginate again, we'd start from the `end` token as the + // next `from` token. + + let prev_token = messages.end; + + // If this token is missing, then we've reached the end of the timeline. + let reached_start = prev_token.is_none(); + + // Note: The chunk could be empty. + // + // If there's any event, they are presented in reverse order (i.e. the first one + // should be prepended first). + let events = messages.chunk; + + // Prepend the previous token (if any) at the beginning of the timeline, + // followed by the events received in the response (in reverse order). + let new_gap = prev_token + .map(|token| TimelineEntry::Gap { prev_token: PaginationToken(token) }) + .into_iter(); + + // For storage, reverse events to store them in the normal (non-reversed order). + // + // It's fine to convert from `TimelineEvent` (i.e. that has a room id) to + // `SyncTimelineEvent` (i.e. that doesn't have it), because those events are + // always tied to a room in storage anyways. + let new_events = events.iter().rev().map(|ev| TimelineEntry::Event(ev.clone().into())); + + let replaced = self + .store + .lock() + .await + .replace_gap(self.room.room_id(), token.as_ref(), new_gap.chain(new_events).collect()) + .await?; + + if !replaced { + // The previous token disappeared! + // This can happen if we got a limited timeline and lost track of our pagination + // token, because the whole timeline has been reset. + // + // TODO: With smarter reconciliation, this might get away. In the meanwhile, + // early return and forget about all the events. + trace!("gap was missing, likely because we observed a gappy sync response"); + Ok(BackPaginationOutcome::UnknownBackpaginationToken) + } else { + trace!("replaced gap with new events from backpagination"); + + // TODO: implement smarter reconciliation later + //let _ = self.sender.send(RoomEventCacheUpdate::Prepend { events }); + + Ok(BackPaginationOutcome::Success { events, reached_start }) + } + } + + /// Returns the oldest back-pagination token, that is, the one closest to + /// the start of the timeline as we know it. + /// + /// Optionally, wait at most for the given duration for a back-pagination + /// token to be returned by a sync. + async fn oldest_backpagination_token( + &self, + max_wait: Option, + ) -> Result> { + // Optimistically try to return the backpagination token immediately. + if let Some(token) = + self.store.lock().await.oldest_backpagination_token(self.room.room_id()).await? + { + return Ok(Some(token)); + } + + let Some(max_wait) = max_wait else { + // We had no token and no time to wait, so... no tokens. + return Ok(None); + }; + + // Otherwise wait for a notification that we received a token. + // Timeouts are fine, per this function's contract. + let _ = timeout(max_wait, self.pagination_token_notifier.notified()).await; + + self.store.lock().await.oldest_backpagination_token(self.room.room_id()).await + } +} + +/// The result of a single back-pagination request. +#[derive(Debug)] +pub enum BackPaginationOutcome { + /// The back-pagination succeeded, and new events have been found. + Success { + /// Did the back-pagination reach the start of the timeline? + reached_start: bool, + + /// All the events that have been returned in the back-pagination + /// request. + /// + /// Events are presented in reverse order: the first element of the vec, + /// if present, is the most "recent" event from the chunk (or + /// technically, the last one in the topological ordering). + /// + /// Note: they're not deduplicated (TODO: smart reconciliation). + events: Vec, + }, + + /// The back-pagination token was unknown to the event cache, and the caller + /// must retry after obtaining a new back-pagination token. + UnknownBackpaginationToken, +} + +/// An update related to events happened in a room. +#[derive(Debug, Clone)] +pub enum RoomEventCacheUpdate { + /// The room has been cleared from events. + Clear, + + /// The room has new events. + Append { + /// All the new events that have been added to the room's timeline. + events: Vec, + /// XXX: this is temporary, until account data lives in the event cache + /// — or will it live there? + account_data: Vec>, + /// XXX: this is temporary, until read receipts are handled in the event + /// cache + ephemeral: Vec>, + /// Collection of ambiguity changes that room member events trigger. + /// + /// This is a map of event ID of the `m.room.member` event to the + /// details of the ambiguity change. + ambiguity_changes: BTreeMap, + }, +} + +#[cfg(test)] +mod tests { + + use assert_matches2::assert_matches; + use matrix_sdk_common::executor::spawn; + use matrix_sdk_test::{async_test, sync_timeline_event}; + use ruma::room_id; + + use super::{store::TimelineEntry, EventCacheError}; + use crate::{event_cache::store::PaginationToken, test_utils::logged_in_client}; + + #[async_test] + async fn test_must_explicitly_subscribe() { + let client = logged_in_client(None).await; + + let event_cache = client.event_cache(); + + // If I create a room event subscriber for a room before subscribing the event + // cache, + let room_id = room_id!("!omelette:fromage.fr"); + let result = event_cache.for_room(room_id).await; + + // Then it fails, because one must explicitly call `.subscribe()` on the event + // cache. + assert_matches!(result, Err(EventCacheError::NotSubscribedYet)); + } + + #[async_test] + async fn test_unknown_pagination_token() { + let client = logged_in_client(None).await; + let room_id = room_id!("!galette:saucisse.bzh"); + client.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + + client.event_cache().subscribe().unwrap(); + + let (room_event_cache, _drop_handles) = + client.event_cache().for_room(room_id).await.unwrap(); + let room_event_cache = room_event_cache.unwrap(); + + // If I try to back-paginate with an unknown back-pagination token, + let token = PaginationToken("old".to_owned()); + + // Then I run into an error. + let res = room_event_cache.backpaginate_with_token(20, Some(token)).await; + assert_matches!(res.unwrap_err(), EventCacheError::UnknownBackpaginationToken); + } + + // Those tests require time to work, and it does not on wasm32. + #[cfg(not(target_arch = "wasm32"))] + mod time_tests { + use std::time::{Duration, Instant}; + + use tokio::time::sleep; + + use super::*; + + #[async_test] + async fn test_wait_no_pagination_token() { + let client = logged_in_client(None).await; + let room_id = room_id!("!galette:saucisse.bzh"); + client.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + + client.event_cache().subscribe().unwrap(); + + // When I only have events in a room, + client + .event_cache() + .inner + .store + .lock() + .await + .append_room_entries( + room_id, + vec![TimelineEntry::Event( + sync_timeline_event!({ + "sender": "b@z.h", + "type": "m.room.message", + "event_id": "$ida", + "origin_server_ts": 12344446, + "content": { "body":"yolo", "msgtype": "m.text" }, + }) + .into(), + )], + ) + .await + .unwrap(); + + let (room_event_cache, _drop_handlers) = + client.event_cache().for_room(room_id).await.unwrap(); + let room_event_cache = room_event_cache.unwrap(); + + // If I don't wait for the backpagination token, + let found = room_event_cache.oldest_backpagination_token(None).await.unwrap(); + // Then I don't find it. + assert!(found.is_none()); + + // If I wait for a back-pagination token for 0 seconds, + let before = Instant::now(); + let found = room_event_cache + .oldest_backpagination_token(Some(Duration::default())) + .await + .unwrap(); + let waited = before.elapsed(); + // then I don't get any, + assert!(found.is_none()); + // and I haven't waited long. + assert!(waited.as_secs() < 1); + + // If I wait for a back-pagination token for 1 second, + let before = Instant::now(); + let found = room_event_cache + .oldest_backpagination_token(Some(Duration::from_secs(1))) + .await + .unwrap(); + let waited = before.elapsed(); + // then I still don't get any. + assert!(found.is_none()); + // and I've waited a bit. + assert!(waited.as_secs() < 2); + assert!(waited.as_secs() >= 1); + } + + #[async_test] + async fn test_wait_for_pagination_token_already_present() { + let client = logged_in_client(None).await; + let room_id = room_id!("!galette:saucisse.bzh"); + client.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + + client.event_cache().subscribe().unwrap(); + + let (room_event_cache, _drop_handles) = + client.event_cache().for_room(room_id).await.unwrap(); + let room_event_cache = room_event_cache.unwrap(); + + let expected_token = PaginationToken("old".to_owned()); + + // When I have events and multiple gaps, in a room, + client + .event_cache() + .inner + .store + .lock() + .await + .append_room_entries( + room_id, + vec![ + TimelineEntry::Gap { prev_token: expected_token.clone() }, + TimelineEntry::Event( + sync_timeline_event!({ + "sender": "b@z.h", + "type": "m.room.message", + "event_id": "$ida", + "origin_server_ts": 12344446, + "content": { "body":"yolo", "msgtype": "m.text" }, + }) + .into(), + ), + ], + ) + .await + .unwrap(); + + // If I don't wait for a back-pagination token, + let found = room_event_cache.oldest_backpagination_token(None).await.unwrap(); + // Then I get it. + assert_eq!(found.as_ref(), Some(&expected_token)); + + // If I wait for a back-pagination token for 0 seconds, + let before = Instant::now(); + let found = room_event_cache + .oldest_backpagination_token(Some(Duration::default())) + .await + .unwrap(); + let waited = before.elapsed(); + // then I do get one. + assert_eq!(found.as_ref(), Some(&expected_token)); + // and I haven't waited long. + assert!(waited.as_millis() < 100); + + // If I wait for a back-pagination token for 1 second, + let before = Instant::now(); + let found = room_event_cache + .oldest_backpagination_token(Some(Duration::from_secs(1))) + .await + .unwrap(); + let waited = before.elapsed(); + // then I do get one. + assert_eq!(found.as_ref(), Some(&expected_token)); + // and I haven't waited long. + assert!(waited.as_millis() < 100); + } + + #[async_test] + async fn test_wait_for_late_pagination_token() { + let client = logged_in_client(None).await; + let room_id = room_id!("!galette:saucisse.bzh"); + client.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + + client.event_cache().subscribe().unwrap(); + + let (room_event_cache, _drop_handles) = + client.event_cache().for_room(room_id).await.unwrap(); + let room_event_cache = room_event_cache.unwrap(); + + let expected_token = PaginationToken("old".to_owned()); + + let before = Instant::now(); + let cloned_expected_token = expected_token.clone(); + let insert_token_task = spawn(async move { + // If a backpagination token is inserted after 400 milliseconds, + sleep(Duration::from_millis(400)).await; + + client + .event_cache() + .inner + .store + .lock() + .await + .append_room_entries( + room_id, + vec![TimelineEntry::Gap { prev_token: cloned_expected_token }], + ) + .await + .unwrap(); + }); + + // Then first I don't get it (if I'm not waiting,) + let found = room_event_cache.oldest_backpagination_token(None).await.unwrap(); + assert!(found.is_none()); + + // And if I wait for the back-pagination token for 600ms, + let found = room_event_cache + .oldest_backpagination_token(Some(Duration::from_millis(600))) + .await + .unwrap(); + let waited = before.elapsed(); + + // then I do get one eventually. + assert_eq!(found.as_ref(), Some(&expected_token)); + // and I have waited between ~400 and ~1000 milliseconds. + assert!(waited.as_secs() < 1); + assert!(waited.as_millis() >= 400); + + // The task succeeded. + insert_token_task.await.unwrap(); + } + } +} diff --git a/crates/matrix-sdk/src/event_cache/store.rs b/crates/matrix-sdk/src/event_cache/store.rs new file mode 100644 index 00000000000..72c26ab7181 --- /dev/null +++ b/crates/matrix-sdk/src/event_cache/store.rs @@ -0,0 +1,340 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::BTreeMap, fmt, iter::once, result::Result as StdResult}; + +use async_trait::async_trait; +use matrix_sdk_common::deserialized_responses::SyncTimelineEvent; +use ruma::{OwnedRoomId, RoomId}; +use tokio::sync::RwLock; + +use super::{ + linked_chunk::{ + Chunk, ChunkIdentifier, ItemPosition, LinkedChunk, LinkedChunkError, LinkedChunkIter, + LinkedChunkIterBackward, + }, + Result, +}; + +/// A store that can be remember information about the event cache. +/// +/// It really acts as a cache, in the sense that clearing the backing data +/// should not have any irremediable effect, other than providing a lesser user +/// experience. +#[async_trait] +pub trait EventCacheStore: Send + Sync { + /// Returns all the known events for the given room. + async fn room_events(&self, room: &RoomId) -> Result>; + + /// Adds all the entries to the given room's timeline. + async fn append_room_entries(&self, room: &RoomId, entries: Vec) -> Result<()>; + + /// Returns whether the store knows about the given pagination token. + async fn contains_gap(&self, room: &RoomId, pagination_token: &PaginationToken) + -> Result; + + /// Replaces a given gap (identified by its pagination token) with the given + /// entries. + /// + /// Note: if the gap hasn't been found, then nothing happens, and the events + /// are lost. + /// + /// Returns whether the gap was found. + async fn replace_gap( + &self, + room: &RoomId, + gap_id: Option<&PaginationToken>, + entries: Vec, + ) -> Result; + + /// Retrieve the oldest backpagination token for the given room. + async fn oldest_backpagination_token(&self, room: &RoomId) -> Result>; + + /// Clear all the information tied to a given room. + /// + /// This forgets the following: + /// - events in the room + /// - pagination tokens + async fn clear_room(&self, room: &RoomId) -> Result<()>; +} + +/// A newtype wrapper for a pagination token returned by a /messages response. +#[derive(Clone, Debug, PartialEq)] +pub struct PaginationToken(pub String); + +#[derive(Clone)] +pub enum TimelineEntry { + Event(SyncTimelineEvent), + + Gap { + /// The token to use in the query, extracted from a previous "from" / + /// "end" field of a `/messages` response. + prev_token: PaginationToken, + }, +} + +/// All the information related to a room and stored in the event cache. +#[derive(Default)] +struct RoomInfo { + /// All the timeline entries per room, in sync order. + entries: Vec, +} + +impl RoomInfo { + fn clear(&mut self) { + self.entries.clear(); + } +} + +/// An [`EventCacheStore`] implementation that keeps all the information in +/// memory. +#[derive(Default)] +pub(crate) struct MemoryStore { + by_room: RwLock>, +} + +impl MemoryStore { + /// Create a new empty [`MemoryStore`]. + pub fn new() -> Self { + Default::default() + } +} + +#[async_trait] +impl EventCacheStore for MemoryStore { + async fn room_events(&self, room: &RoomId) -> Result> { + Ok(self + .by_room + .read() + .await + .get(room) + .map(|room_info| { + room_info + .entries + .iter() + .filter_map( + |entry| if let TimelineEntry::Event(ev) = entry { Some(ev) } else { None }, + ) + .cloned() + .collect() + }) + .unwrap_or_default()) + } + + async fn append_room_entries(&self, room: &RoomId, entries: Vec) -> Result<()> { + self.by_room.write().await.entry(room.to_owned()).or_default().entries.extend(entries); + Ok(()) + } + + async fn clear_room(&self, room: &RoomId) -> Result<()> { + // Clear the room, so as to avoid reallocations if the room is being reused. + // XXX: do we also want an actual way to *remove* a room? (for left rooms) + if let Some(room) = self.by_room.write().await.get_mut(room) { + room.clear(); + } + + Ok(()) + } + + async fn oldest_backpagination_token(&self, room: &RoomId) -> Result> { + Ok(self.by_room.read().await.get(room).and_then(|room| { + room.entries.iter().find_map(|entry| { + if let TimelineEntry::Gap { prev_token: backpagination_token } = entry { + Some(backpagination_token.clone()) + } else { + None + } + }) + })) + } + + async fn contains_gap(&self, room: &RoomId, needle: &PaginationToken) -> Result { + let mut by_room_guard = self.by_room.write().await; + let room = by_room_guard.entry(room.to_owned()).or_default(); + + Ok(room.entries.iter().any(|entry| { + if let TimelineEntry::Gap { prev_token: existing } = entry { + existing == needle + } else { + false + } + })) + } + + async fn replace_gap( + &self, + room: &RoomId, + token: Option<&PaginationToken>, + entries: Vec, + ) -> Result { + let mut by_room_guard = self.by_room.write().await; + let room = by_room_guard.entry(room.to_owned()).or_default(); + + if let Some(token) = token { + let gap_pos = room.entries.iter().enumerate().find_map(|(i, t)| { + if let TimelineEntry::Gap { prev_token: existing } = t { + if existing == token { + return Some(i); + } + } + None + }); + + if let Some(pos) = gap_pos { + room.entries.splice(pos..pos + 1, entries); + Ok(true) + } else { + Ok(false) + } + } else { + // We had no previous token: assume we can prepend the events. + room.entries.splice(0..0, entries); + Ok(true) + } + } +} + +const DEFAULT_CHUNK_CAPACITY: usize = 128; + +pub struct RoomEvents { + chunks: LinkedChunk, +} + +impl Default for RoomEvents { + fn default() -> Self { + Self::new() + } +} + +#[allow(dead_code)] +impl RoomEvents { + pub fn new() -> Self { + Self { chunks: LinkedChunk::new() } + } + + /// Return the number of events. + pub fn len(&self) -> usize { + self.chunks.len() + } + + /// Push one event after existing events. + pub fn push_event(&mut self, event: SyncTimelineEvent) { + self.push_events(once(event)) + } + + /// Push events after existing events. + /// + /// The last event in `events` is the most recent one. + pub fn push_events(&mut self, events: I) + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + self.chunks.push_items_back(events) + } + + /// Insert events at a specified position. + pub fn insert_events_at( + &mut self, + events: I, + position: ItemPosition, + ) -> StdResult<(), LinkedChunkError> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + self.chunks.insert_items_at(events, position) + } + + /// Insert a gap at a specified position. + pub fn insert_gap_at(&mut self, position: ItemPosition) -> StdResult<(), LinkedChunkError> { + self.chunks.insert_gap_at(position) + } + + /// Search for a chunk, and return its identifier. + pub fn chunk_identifier<'a, P>(&'a self, predicate: P) -> Option + where + P: FnMut(&'a Chunk) -> bool, + { + self.chunks.chunk_identifier(predicate) + } + + /// Search for an item, and return its position. + pub fn event_position<'a, P>(&'a self, predicate: P) -> Option + where + P: FnMut(&'a SyncTimelineEvent) -> bool, + { + self.chunks.item_position(predicate) + } + + /// Iterate over the chunks, backward. + /// + /// The most recent chunk comes first. + pub fn rchunks( + &self, + ) -> LinkedChunkIterBackward<'_, SyncTimelineEvent, DEFAULT_CHUNK_CAPACITY> { + self.chunks.rchunks() + } + + /// Iterate over the chunks, starting from `identifier`, backward. + pub fn rchunks_from( + &self, + identifier: ChunkIdentifier, + ) -> StdResult< + LinkedChunkIterBackward<'_, SyncTimelineEvent, DEFAULT_CHUNK_CAPACITY>, + LinkedChunkError, + > { + self.chunks.rchunks_from(identifier) + } + + /// Iterate over the chunks, starting from `identifier`, forward — i.e. + /// to the latest chunk. + pub fn chunks_from( + &self, + identifier: ChunkIdentifier, + ) -> StdResult, LinkedChunkError> + { + self.chunks.chunks_from(identifier) + } + + /// Iterate over the events, backward. + /// + /// The most recent event comes first. + pub fn revents(&self) -> impl Iterator { + self.chunks.ritems() + } + + /// Iterate over the events, starting from `position`, backward. + pub fn revents_from( + &self, + position: ItemPosition, + ) -> StdResult, LinkedChunkError> { + self.chunks.ritems_from(position) + } + + /// Iterate over the events, starting from `position`, forward — i.e. + /// to the latest event. + pub fn events_from( + &self, + position: ItemPosition, + ) -> StdResult, LinkedChunkError> { + self.chunks.items_from(position) + } +} + +impl fmt::Debug for RoomEvents { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> StdResult<(), fmt::Error> { + formatter.debug_struct("RoomEvents").field("chunk", &self.chunks).finish() + } +} diff --git a/crates/matrix-sdk/src/http_client/mod.rs b/crates/matrix-sdk/src/http_client/mod.rs index 72390e42708..409ae5319c4 100644 --- a/crates/matrix-sdk/src/http_client/mod.rs +++ b/crates/matrix-sdk/src/http_client/mod.rs @@ -136,8 +136,11 @@ impl HttpClient { span.record("config", debug(config)).record("request_id", request_id); let auth_scheme = R::METADATA.authentication; - if !matches!(auth_scheme, AuthScheme::AccessToken | AuthScheme::None) { - return Err(HttpError::NotClientRequest); + match auth_scheme { + AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None => {} + AuthScheme::ServerSignatures => { + return Err(HttpError::NotClientRequest); + } } let request = diff --git a/crates/matrix-sdk/src/lib.rs b/crates/matrix-sdk/src/lib.rs index 1087f837b07..4d755e84843 100644 --- a/crates/matrix-sdk/src/lib.rs +++ b/crates/matrix-sdk/src/lib.rs @@ -39,6 +39,7 @@ mod deduplicating_handler; #[cfg(feature = "e2e-encryption")] pub mod encryption; mod error; +pub mod event_cache; pub mod event_handler; mod http_client; pub mod matrix_auth; @@ -61,7 +62,9 @@ pub mod widget; pub use account::Account; pub use authentication::{AuthApi, AuthSession, SessionTokens}; -pub use client::{Client, ClientBuildError, ClientBuilder, LoopCtrl, SessionChange}; +pub use client::{ + sanitize_server_name, Client, ClientBuildError, ClientBuilder, LoopCtrl, SessionChange, +}; #[cfg(feature = "image-proc")] pub use error::ImageError; pub use error::{ @@ -90,39 +93,3 @@ pub mod test_utils; #[cfg(test)] matrix_sdk_test::init_tracing_for_tests!(); - -/// Creates a server name from a user supplied string. The string is first -/// sanitized by removing whitespace, the http(s) scheme and any trailing -/// slashes before being parsed. -pub fn sanitize_server_name(s: &str) -> Result { - ServerName::parse( - s.trim().trim_start_matches("http://").trim_start_matches("https://").trim_end_matches('/'), - ) -} - -#[cfg(test)] -mod tests { - use assert_matches::assert_matches; - - use crate::sanitize_server_name; - - #[test] - fn test_sanitize_server_name() { - assert_eq!(sanitize_server_name("matrix.org").unwrap().as_str(), "matrix.org"); - assert_eq!(sanitize_server_name("https://matrix.org").unwrap().as_str(), "matrix.org"); - assert_eq!(sanitize_server_name("http://matrix.org").unwrap().as_str(), "matrix.org"); - assert_eq!( - sanitize_server_name("https://matrix.server.org").unwrap().as_str(), - "matrix.server.org" - ); - assert_eq!( - sanitize_server_name("https://matrix.server.org/").unwrap().as_str(), - "matrix.server.org" - ); - assert_eq!( - sanitize_server_name(" https://matrix.server.org// ").unwrap().as_str(), - "matrix.server.org" - ); - assert_matches!(sanitize_server_name("https://matrix.server.org/something"), Err(_)) - } -} diff --git a/crates/matrix-sdk/src/matrix_auth/mod.rs b/crates/matrix-sdk/src/matrix_auth/mod.rs index f9fafb6e295..e425d9b7723 100644 --- a/crates/matrix-sdk/src/matrix_auth/mod.rs +++ b/crates/matrix-sdk/src/matrix_auth/mod.rs @@ -871,9 +871,11 @@ impl MatrixAuth { use ruma::api::client::uiaa::{AuthData, Password}; let auth_data = match login_info { - Some(login::v3::LoginInfo::Password(p)) => { - Some(AuthData::Password(Password::new(p.identifier, p.password))) - } + Some(login::v3::LoginInfo::Password(login::v3::Password { + identifier: Some(identifier), + password, + .. + })) => Some(AuthData::Password(Password::new(identifier, password))), // Other methods can't be immediately translated to an auth. _ => None, }; diff --git a/crates/matrix-sdk/src/media.rs b/crates/matrix-sdk/src/media.rs index 5d9de6b5bbc..8516d3d953c 100644 --- a/crates/matrix-sdk/src/media.rs +++ b/crates/matrix-sdk/src/media.rs @@ -25,8 +25,6 @@ use eyeball::SharedObservable; use futures_util::future::try_join; pub use matrix_sdk_base::media::*; use mime::Mime; -#[cfg(not(target_arch = "wasm32"))] -use mime2ext; use ruma::{ api::client::media::{create_content, get_content, get_content_thumbnail}, assign, diff --git a/crates/matrix-sdk/src/room/futures.rs b/crates/matrix-sdk/src/room/futures.rs index ebe2b6f0566..b599df1d5bf 100644 --- a/crates/matrix-sdk/src/room/futures.rs +++ b/crates/matrix-sdk/src/room/futures.rs @@ -32,7 +32,7 @@ use ruma::{ serde::Raw, OwnedTransactionId, TransactionId, }; -use tracing::{debug, Instrument, Span}; +use tracing::{debug, info, Instrument, Span}; use super::Room; use crate::{ @@ -201,6 +201,10 @@ impl<'a> IntoFuture for SendRawMessageLikeEvent<'a> { ); let response = room.client.send(request, None).await?; + + tracing::Span::current().record("event_id", tracing::field::debug(&response.event_id)); + info!("Sent event in room"); + Ok(response) }; diff --git a/crates/matrix-sdk/src/room/mod.rs b/crates/matrix-sdk/src/room/mod.rs index 7e60defc768..d3bdafe93d3 100644 --- a/crates/matrix-sdk/src/room/mod.rs +++ b/crates/matrix-sdk/src/room/mod.rs @@ -1,6 +1,12 @@ //! High-level room API -use std::{borrow::Borrow, collections::BTreeMap, ops::Deref, time::Duration}; +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap}, + ops::Deref, + sync::Arc, + time::Duration, +}; use eyeball::SharedObservable; use futures_core::Stream; @@ -75,9 +81,17 @@ use tokio::sync::broadcast; use tracing::{debug, info, instrument, warn}; use self::futures::{SendAttachment, SendMessageLikeEvent, SendRawMessageLikeEvent}; +pub use self::{ + member::{RoomMember, RoomMemberRole}, + messages::{Messages, MessagesOptions}, +}; +#[cfg(doc)] +use crate::event_cache::EventCache; use crate::{ attachment::AttachmentConfig, + config::RequestConfig, error::WrongRoomState, + event_cache::{self, EventCacheDropHandles, RoomEventCache}, event_handler::{EventHandler, EventHandlerDropGuard, EventHandlerHandle, SyncEvent}, media::{MediaFormat, MediaRequest}, notification_settings::{IsEncrypted, IsOneToOne, RoomNotificationMode}, @@ -92,11 +106,6 @@ mod member; mod messages; pub mod power_levels; -pub use self::{ - member::{RoomMember, RoomMemberRole}, - messages::{Messages, MessagesOptions}, -}; - /// A struct containing methods that are common for Joined, Invited and Left /// Rooms #[derive(Debug, Clone)] @@ -411,11 +420,23 @@ impl Room { .members_request_deduplicated_handler .run(self.room_id().to_owned(), async move { let request = get_member_events::v3::Request::new(self.inner.room_id().to_owned()); - let response = self.client.send(request, None).await?; + let response = self + .client + .send( + request.clone(), + // In some cases it can take longer than 30s to load: + // https://github.com/element-hq/synapse/issues/16872 + Some(RequestConfig::new().timeout(Duration::from_secs(60)).retry_limit(3)), + ) + .await?; // That's a large `Future`. Let's `Box::pin` to reduce its size on the stack. - Box::pin(self.client.base_client().receive_members(self.room_id(), &response)) - .await?; + Box::pin(self.client.base_client().receive_all_members( + self.room_id(), + &request, + &response, + )) + .await?; Ok(()) }) @@ -1107,6 +1128,29 @@ impl Room { } } + /// Forces the currently active room key, which is used to encrypt messages, + /// to be rotated. + /// + /// A new room key will be crated and shared with all the room members the + /// next time a message will be sent. You don't have to call this method, + /// room keys will be rotated automatically when necessary. This method is + /// still useful for debugging purposes. + /// + /// For more info please take a look a the [`encryption`] module + /// documentation. + /// + /// [`encryption`]: crate::encryption + #[cfg(feature = "e2e-encryption")] + pub async fn discard_room_key(&self) -> Result<()> { + let machine = self.client.olm_machine().await; + if let Some(machine) = machine.as_ref() { + machine.discard_room_key(self.inner.room_id()).await?; + Ok(()) + } else { + Err(Error::NoOlmMachine) + } + } + /// Ban the user with `UserId` from this room. /// /// # Arguments @@ -1404,12 +1448,13 @@ impl Room { // TODO: expose this publicly so people can pre-share a group session if // e.g. a user starts to type a message for a room. #[cfg(feature = "e2e-encryption")] - #[instrument(skip_all, fields(room_id = ?self.room_id()))] + #[instrument(skip_all, fields(room_id = ?self.room_id(), store_generation))] async fn preshare_room_key(&self) -> Result<()> { self.ensure_room_joined()?; // Take and release the lock on the store, if needs be. - let _guard = self.client.encryption().spin_lock_store(Some(60000)).await?; + let guard = self.client.encryption().spin_lock_store(Some(60000)).await?; + tracing::Span::current().record("store_generation", guard.map(|guard| guard.generation())); self.client .locks() @@ -1432,7 +1477,7 @@ impl Room { if let Err(r) = response { let machine = self.client.olm_machine().await; if let Some(machine) = machine.as_ref() { - machine.invalidate_group_session(self.room_id()).await?; + machine.discard_room_key(self.room_id()).await?; } return Err(r); } @@ -1555,8 +1600,6 @@ impl Room { /// Run /keys/query requests for all the non-tracked users. #[cfg(feature = "e2e-encryption")] async fn query_keys_for_untracked_users(&self) -> Result<()> { - use std::collections::HashMap; - let olm = self.client.olm_machine().await; let olm = olm.as_ref().expect("Olm machine wasn't started"); @@ -1629,7 +1672,7 @@ impl Room { /// } /// # anyhow::Ok(()) }; /// ``` - #[instrument(skip_all, fields(event_type, room_id = ?self.room_id(), transaction_id, encrypted))] + #[instrument(skip_all, fields(event_type, room_id = ?self.room_id(), transaction_id, encrypted, event_id))] pub fn send_raw<'a>( &'a self, event_type: &'a str, @@ -1822,6 +1865,47 @@ impl Room { .power_levels()) } + /// Resets the room's power levels to the default values + /// + /// [spec]: https://spec.matrix.org/v1.9/client-server-api/#mroompower_levels + pub async fn reset_power_levels(&self) -> Result { + let default_power_levels = RoomPowerLevels::from(RoomPowerLevelsEventContent::new()); + let changes = RoomPowerLevelChanges::from(default_power_levels); + self.apply_power_level_changes(changes).await?; + self.room_power_levels().await + } + + /// Gets the suggested role for the user with the provided `user_id`. + /// + /// This method checks the `RoomPowerLevels` events instead of loading the + /// member list and looking for the member. + pub async fn get_suggested_user_role(&self, user_id: &UserId) -> Result { + let power_level = self.get_user_power_level(user_id).await?; + Ok(RoomMemberRole::suggested_role_for_power_level(power_level)) + } + + /// Gets the power level the user with the provided `user_id`. + /// + /// This method checks the `RoomPowerLevels` events instead of loading the + /// member list and looking for the member. + pub async fn get_user_power_level(&self, user_id: &UserId) -> Result { + let event = self.room_power_levels().await?; + Ok(event.for_user(user_id).into()) + } + + /// Gets a map with the `UserId` of users with power levels other than `0` + /// and this power level. + pub async fn users_with_power_levels(&self) -> HashMap { + let power_levels = self.room_power_levels().await.ok(); + let mut user_power_levels = HashMap::::new(); + if let Some(power_levels) = power_levels { + for (id, level) in power_levels.users.into_iter() { + user_power_levels.insert(id, level.into()); + } + } + user_power_levels + } + /// Sets the name of this room. pub async fn set_name(&self, name: String) -> Result { self.send_state_event(RoomNameEventContent::new(name)).await @@ -2521,6 +2605,20 @@ impl Room { self.client.send(request, None).await?; Ok(()) } + + /// Returns the [`RoomEventCache`] associated to this room, assuming the + /// global [`EventCache`] has been enabled for subscription. + pub async fn event_cache( + &self, + ) -> event_cache::Result<(RoomEventCache, Arc)> { + let global_event_cache = self.client.event_cache(); + + global_event_cache.for_room(self.room_id()).await.map(|(maybe_room, drop_handles)| { + // SAFETY: the `RoomEventCache` must always been found, since we're constructing + // from a `Room`. + (maybe_room.unwrap(), drop_handles) + }) + } } /// Details of the (latest) invite. diff --git a/crates/matrix-sdk/src/room/power_levels.rs b/crates/matrix-sdk/src/room/power_levels.rs index 4e891f863af..4b272280bbd 100644 --- a/crates/matrix-sdk/src/room/power_levels.rs +++ b/crates/matrix-sdk/src/room/power_levels.rs @@ -199,11 +199,7 @@ pub fn power_level_user_changes( mod tests { use std::collections::BTreeMap; - use ruma::{ - events::room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - int, - power_levels::NotificationPowerLevels, - }; + use ruma::{int, power_levels::NotificationPowerLevels}; use super::*; diff --git a/crates/matrix-sdk/src/sliding_sync/mod.rs b/crates/matrix-sdk/src/sliding_sync/mod.rs index 9ffe486e3a1..438fff0c7a5 100644 --- a/crates/matrix-sdk/src/sliding_sync/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/mod.rs @@ -722,14 +722,11 @@ impl SlidingSync { pub fn sync(&self) -> impl Stream> + '_ { debug!("Starting sync stream"); - let sync_span = Span::current(); let mut internal_channel_receiver = self.inner.internal_channel.subscribe(); stream! { loop { - sync_span.in_scope(|| { - debug!("Sync stream is running"); - }); + debug!("Sync stream is running"); select! { biased; @@ -737,9 +734,7 @@ impl SlidingSync { internal_message = internal_channel_receiver.recv() => { use SlidingSyncInternalMessage::*; - sync_span.in_scope(|| { - debug!(?internal_message, "Sync stream has received an internal message"); - }); + debug!(?internal_message, "Sync stream has received an internal message"); match internal_message { Err(_) | Ok(SyncLoopStop) => { @@ -752,7 +747,7 @@ impl SlidingSync { } } - update_summary = self.sync_once().instrument(sync_span.clone()) => { + update_summary = self.sync_once() => { match update_summary { Ok(updates) => { yield Ok(updates); @@ -767,9 +762,7 @@ impl SlidingSync { Err(error) => { if error.client_api_error_kind() == Some(&ErrorKind::UnknownPos) { // The Sliding Sync session has expired. Let's reset `pos` and sticky parameters. - sync_span.in_scope(|| async { - self.expire_session().await; - }).await; + self.expire_session().await; } yield Err(error); diff --git a/crates/matrix-sdk/src/test_utils.rs b/crates/matrix-sdk/src/test_utils.rs index e9ec3f4651d..63b0f8d343c 100644 --- a/crates/matrix-sdk/src/test_utils.rs +++ b/crates/matrix-sdk/src/test_utils.rs @@ -1,9 +1,10 @@ -//! Testing utilities - DO NOT USE IN PRODUCTION. +//! Testing utilities - DO NOT USE IN PRODUCTION. #![allow(dead_code)] use matrix_sdk_base::SessionMeta; use ruma::{api::MatrixVersion, device_id, user_id}; +use url::Url; use crate::{ config::RequestConfig, @@ -11,12 +12,18 @@ use crate::{ Client, ClientBuilder, }; -pub(crate) fn test_client_builder(homeserver_url: Option) -> ClientBuilder { - let homeserver = homeserver_url.as_deref().unwrap_or("http://localhost:1234"); +/// A [`ClientBuilder`] fit for testing, using the given `homeserver_url` (or +/// localhost:1234). +pub fn test_client_builder(homeserver_url: Option) -> ClientBuilder { + let homeserver = homeserver_url + .map(|url| Url::try_from(url.as_str()).unwrap()) + .unwrap_or_else(|| Url::try_from("http://localhost:1234").unwrap()); Client::builder().homeserver_url(homeserver).server_versions([MatrixVersion::V1_0]) } -pub(crate) async fn no_retry_test_client(homeserver_url: Option) -> Client { +/// A [`Client`] using the given `homeserver_url` (or localhost:1234), that will +/// never retry any failed requests. +pub async fn no_retry_test_client(homeserver_url: Option) -> Client { test_client_builder(homeserver_url) .request_config(RequestConfig::new().disable_retry()) .build() @@ -24,16 +31,47 @@ pub(crate) async fn no_retry_test_client(homeserver_url: Option) -> Clie .unwrap() } -pub(crate) async fn logged_in_client(homeserver_url: Option) -> Client { - let session = MatrixSession { - meta: SessionMeta { - user_id: user_id!("@example:localhost").to_owned(), - device_id: device_id!("DEVICEID").to_owned(), - }, - tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, - }; +/// A [`Client`] using the given `homeserver_url` (or localhost:1234), that will +/// never retry any failed requests, and already logged in with an hardcoded +/// Matrix authentication session (the user id and device id are hardcoded too). +pub async fn logged_in_client(homeserver_url: Option) -> Client { let client = no_retry_test_client(homeserver_url).await; - client.matrix_auth().restore_session(session).await.unwrap(); client + .matrix_auth() + .restore_session(MatrixSession { + meta: SessionMeta { + user_id: user_id!("@example:localhost").to_owned(), + device_id: device_id!("DEVICEID").to_owned(), + }, + tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, + }) + .await + .unwrap(); + + client +} + +/// Like [`test_client_builder`], but with a mocked server too. +#[cfg(not(target_arch = "wasm32"))] +pub async fn test_client_builder_with_server() -> (ClientBuilder, wiremock::MockServer) { + let server = wiremock::MockServer::start().await; + let builder = test_client_builder(Some(server.uri().to_string())); + (builder, server) +} + +/// Like [`no_retry_test_client`], but with a mocked server too. +#[cfg(not(target_arch = "wasm32"))] +pub async fn no_retry_test_client_with_server() -> (Client, wiremock::MockServer) { + let server = wiremock::MockServer::start().await; + let client = no_retry_test_client(Some(server.uri().to_string())).await; + (client, server) +} + +/// Like [`logged_in_client`], but with a mocked server too. +#[cfg(not(target_arch = "wasm32"))] +pub async fn logged_in_client_with_server() -> (Client, wiremock::MockServer) { + let server = wiremock::MockServer::start().await; + let client = logged_in_client(Some(server.uri().to_string())).await; + (client, server) } diff --git a/crates/matrix-sdk/tests/integration/client.rs b/crates/matrix-sdk/tests/integration/client.rs index 3069bb05552..1210cc0bedc 100644 --- a/crates/matrix-sdk/tests/integration/client.rs +++ b/crates/matrix-sdk/tests/integration/client.rs @@ -6,6 +6,7 @@ use matrix_sdk::{ config::SyncSettings, media::{MediaFormat, MediaRequest, MediaThumbnailSize}, sync::RoomUpdate, + test_utils::no_retry_test_client_with_server, }; use matrix_sdk_base::{sync::RoomUpdates, RoomState}; use matrix_sdk_test::{ @@ -45,11 +46,11 @@ use wiremock::{ Mock, Request, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync, no_retry_test_client}; +use crate::{logged_in_client_with_server, mock_sync}; #[async_test] async fn sync() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; @@ -62,7 +63,7 @@ async fn sync() { #[async_test] async fn devices() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("GET")) .and(path("/_matrix/client/r0/devices")) @@ -75,7 +76,7 @@ async fn devices() { #[async_test] async fn delete_devices() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/delete_devices")) @@ -141,7 +142,7 @@ async fn delete_devices() { #[async_test] async fn resolve_room_alias() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("GET")) .and(path("/_matrix/client/r0/directory/room/%23alias:example.org")) @@ -155,7 +156,7 @@ async fn resolve_room_alias() { #[async_test] async fn join_leave_room() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; @@ -178,7 +179,7 @@ async fn join_leave_room() { #[async_test] async fn join_room_by_id() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/join")) @@ -197,7 +198,7 @@ async fn join_room_by_id() { #[async_test] async fn join_room_by_id_or_alias() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/join/")) @@ -223,7 +224,7 @@ async fn join_room_by_id_or_alias() { #[async_test] async fn room_search_all() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("GET")) .and(path("/_matrix/client/r0/publicRooms")) @@ -238,7 +239,7 @@ async fn room_search_all() { #[async_test] async fn room_search_filtered() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/publicRooms")) @@ -258,7 +259,7 @@ async fn room_search_filtered() { #[async_test] async fn invited_rooms() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::INVITE_SYNC, None).await; @@ -274,7 +275,7 @@ async fn invited_rooms() { #[async_test] async fn left_rooms() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::LEAVE_SYNC, None).await; @@ -290,7 +291,7 @@ async fn left_rooms() { #[async_test] async fn get_media_content() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let media = client.media(); @@ -358,7 +359,7 @@ async fn get_media_content() { #[async_test] async fn get_media_file() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let event_content = ImageMessageEventContent::plain( "filename.jpg".into(), @@ -403,7 +404,7 @@ async fn get_media_file() { #[async_test] async fn whoami() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("GET")) .and(path("/_matrix/client/r0/account/whoami")) @@ -419,7 +420,7 @@ async fn whoami() { #[async_test] async fn test_room_update_channel() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut rx = client.subscribe_to_room_updates(&DEFAULT_TEST_ROOM_ID); @@ -444,7 +445,7 @@ async fn test_room_update_channel() { #[async_test] async fn test_subscribe_all_room_updates() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut rx = client.subscribe_to_all_room_updates(); @@ -504,7 +505,7 @@ async fn test_subscribe_all_room_updates() { #[cfg(all(feature = "e2e-encryption", not(target_arch = "wasm32")))] #[async_test] async fn request_encryption_event_before_sending() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; client @@ -555,7 +556,7 @@ async fn request_encryption_event_before_sending() { // a DM. #[async_test] async fn marking_room_as_dm() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; client @@ -626,7 +627,7 @@ async fn marking_room_as_dm() { #[cfg(feature = "e2e-encryption")] #[async_test] async fn get_own_device() { - let (client, _) = logged_in_client().await; + let (client, _) = logged_in_client_with_server().await; let device = client .encryption() @@ -649,7 +650,7 @@ async fn get_own_device() { #[cfg(feature = "e2e-encryption")] #[async_test] async fn cross_signing_status() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/unstable/keys/device_signing/upload")) @@ -710,7 +711,7 @@ async fn test_encrypt_room_event() { use ruma::events::room::encrypted::RoomEncryptedEventContent; - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = client.user_id().unwrap(); Mock::given(method("POST")) @@ -844,7 +845,7 @@ async fn test_encrypt_room_event() { #[cfg(not(feature = "e2e-encryption"))] #[async_test] async fn create_dm_non_encrypted() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = user_id!("@invitee:localhost"); Mock::given(method("POST")) @@ -893,7 +894,7 @@ async fn create_dm_non_encrypted() { #[cfg(feature = "e2e-encryption")] #[async_test] async fn create_dm_encrypted() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = user_id!("@invitee:localhost"); Mock::given(method("POST")) @@ -955,7 +956,7 @@ async fn create_dm_encrypted() { #[async_test] async fn create_dm_error() { - let (client, _server) = logged_in_client().await; + let (client, _server) = logged_in_client_with_server().await; let user_id = user_id!("@invitee:localhost"); // The endpoint is not mocked so we encounter a 404. @@ -967,7 +968,7 @@ async fn create_dm_error() { #[async_test] async fn test_ambiguity_changes() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let example_id = user_id!("@example:localhost"); let example_2_id = user_id!("@example2:localhost"); diff --git a/crates/matrix-sdk/tests/integration/encryption/backups.rs b/crates/matrix-sdk/tests/integration/encryption/backups.rs index 44babcde647..7fa5e9ff6de 100644 --- a/crates/matrix-sdk/tests/integration/encryption/backups.rs +++ b/crates/matrix-sdk/tests/integration/encryption/backups.rs @@ -24,6 +24,7 @@ use matrix_sdk::{ BackupDownloadStrategy, EncryptionSettings, }, matrix_auth::{MatrixSession, MatrixSessionTokens}, + test_utils::{no_retry_test_client_with_server, test_client_builder_with_server}, Client, }; use matrix_sdk_base::SessionMeta; @@ -42,10 +43,7 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{ - encryption::mock_secret_store_with_backup_key, mock_sync, no_retry_test_client, - test_client_builder, -}; +use crate::{encryption::mock_secret_store_with_backup_key, mock_sync}; const ROOM_KEY: &[u8] = b"\ -----BEGIN MEGOLM SESSION DATA-----\n\ @@ -85,7 +83,7 @@ async fn create() { tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; assert!( !client.encryption().backups().are_enabled().await, @@ -160,7 +158,7 @@ async fn creation_failure() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); mount_once( @@ -241,7 +239,7 @@ async fn disabling() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); mount_once( @@ -329,7 +327,7 @@ async fn backup_resumption() { let user_id = user_id!("@example:morpheus.localhost"); - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .sqlite_store(dir.path(), None) @@ -423,7 +421,7 @@ async fn steady_state_waiting() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); setup_backups(&client, &server).await; @@ -607,7 +605,7 @@ async fn incremental_upload_of_keys() -> Result<()> { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); let backups = client.encryption().backups(); @@ -788,7 +786,7 @@ async fn steady_state_waiting_errors() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); let result = client.encryption().backups().wait_for_steady_state().await; @@ -875,7 +873,7 @@ async fn enable_from_secret_storage() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let encryption_settings = EncryptionSettings { backup_download_strategy: BackupDownloadStrategy::OneShot, ..Default::default() @@ -1042,7 +1040,7 @@ async fn enable_from_secret_storage_no_existing_backup() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let encryption_settings = EncryptionSettings { backup_download_strategy: BackupDownloadStrategy::OneShot, ..Default::default() @@ -1095,7 +1093,7 @@ async fn enable_from_secret_storage_mismatched_key() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let encryption_settings = EncryptionSettings { backup_download_strategy: BackupDownloadStrategy::OneShot, ..Default::default() @@ -1156,7 +1154,7 @@ async fn enable_from_secret_storage_manual_download() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder.request_config(RequestConfig::new().disable_retry()).build().await.unwrap(); @@ -1198,7 +1196,7 @@ async fn enable_from_secret_storage_and_manual_download() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let encryption_settings = EncryptionSettings { backup_download_strategy: BackupDownloadStrategy::Manual, ..Default::default() @@ -1349,7 +1347,7 @@ async fn enable_from_secret_storage_and_download_after_utd() { meta: SessionMeta { user_id: user_id.into(), device_id: device_id!("DEVICEID").to_owned() }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let encryption_settings = EncryptionSettings { backup_download_strategy: BackupDownloadStrategy::AfterDecryptionFailure, ..Default::default() diff --git a/crates/matrix-sdk/tests/integration/encryption/recovery.rs b/crates/matrix-sdk/tests/integration/encryption/recovery.rs index e54ade6ba68..9be27e6bf34 100644 --- a/crates/matrix-sdk/tests/integration/encryption/recovery.rs +++ b/crates/matrix-sdk/tests/integration/encryption/recovery.rs @@ -23,6 +23,7 @@ use matrix_sdk::{ BackupDownloadStrategy, }, matrix_auth::{MatrixSession, MatrixSessionTokens}, + test_utils::{no_retry_test_client_with_server, test_client_builder_with_server}, Client, }; use matrix_sdk_base::SessionMeta; @@ -36,10 +37,7 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{ - encryption::mock_secret_store_with_backup_key, logged_in_client, no_retry_test_client, - test_client_builder, -}; +use crate::{encryption::mock_secret_store_with_backup_key, logged_in_client_with_server}; async fn test_client(user_id: &UserId) -> (Client, wiremock::MockServer) { let session = MatrixSession { @@ -47,7 +45,7 @@ async fn test_client(user_id: &UserId) -> (Client, wiremock::MockServer) { tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .with_encryption_settings(matrix_sdk::encryption::EncryptionSettings { @@ -157,7 +155,7 @@ async fn mock_put_new_default_secret_storage_key(user_id: &UserId, server: &wire #[async_test] async fn recovery_status_server_unavailable() { - let (client, _) = logged_in_client().await; + let (client, _) = logged_in_client_with_server().await; client.encryption().wait_for_e2ee_initialization_tasks().await; assert_eq!(client.encryption().recovery().state(), RecoveryState::Unknown); } @@ -172,7 +170,7 @@ async fn recovery_status_secret_storage_set_up() { tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; mock_secret_store_with_backup_key(user_id, KEY_ID, &server).await; @@ -193,7 +191,7 @@ async fn recovery_status_secret_storage_not_set_up() { tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("GET")) .and(path(format!( @@ -707,7 +705,7 @@ async fn recover_and_reset() { tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; mock_secret_store_with_backup_key(user_id, KEY_ID, &server).await; diff --git a/crates/matrix-sdk/tests/integration/encryption/secret_storage.rs b/crates/matrix-sdk/tests/integration/encryption/secret_storage.rs index 3bcb2944aae..d1e62bd67d6 100644 --- a/crates/matrix-sdk/tests/integration/encryption/secret_storage.rs +++ b/crates/matrix-sdk/tests/integration/encryption/secret_storage.rs @@ -4,6 +4,7 @@ use assert_matches::assert_matches; use matrix_sdk::{ encryption::secret_storage::SecretStorageError, matrix_auth::{MatrixSession, MatrixSessionTokens}, + test_utils::no_retry_test_client_with_server, }; use matrix_sdk_base::SessionMeta; use matrix_sdk_test::async_test; @@ -23,7 +24,7 @@ use wiremock::{ Mock, MockServer, ResponseTemplate, }; -use crate::{logged_in_client, no_retry_test_client}; +use crate::logged_in_client_with_server; const SECRET_STORE_KEY: &str = "EsTj 3yST y93F SLpB jJsz eAXc 2XzA ygD3 w69H fGaN TKBj jXEd"; @@ -65,7 +66,7 @@ async fn mock_secret_store_key( #[async_test] async fn secret_store_create_default_key() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = client.user_id().expect("We should know our user ID by now"); @@ -140,7 +141,7 @@ async fn secret_store_create_default_key() { #[async_test] async fn secret_store_missing_key_info() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = client.user_id().expect("We should know our user ID by now"); let key_id = "bmur2d9ypPUH1msSwCxQOJkuKRmJI55e"; @@ -190,7 +191,7 @@ async fn secret_store_missing_key_info() { #[async_test] async fn secret_store_not_setup() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let user_id = client.user_id().expect("We should know our user ID by now"); @@ -221,7 +222,7 @@ async fn secret_store_not_setup() { #[async_test] async fn secret_store_opening() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_secret_store_key( &server, @@ -269,7 +270,7 @@ async fn secret_store_opening() { #[async_test] async fn set_in_secret_store() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_secret_store_key( &server, @@ -375,7 +376,7 @@ async fn restore_cross_signing_from_secret_store() { }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); mock_secret_store_key( @@ -576,7 +577,7 @@ async fn is_secret_storage_enabled() { }, tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, }; - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; client.restore_session(session).await.unwrap(); { diff --git a/crates/matrix-sdk/tests/integration/encryption/verification.rs b/crates/matrix-sdk/tests/integration/encryption/verification.rs index dac67c6e5d9..033e3dc7411 100644 --- a/crates/matrix-sdk/tests/integration/encryption/verification.rs +++ b/crates/matrix-sdk/tests/integration/encryption/verification.rs @@ -3,20 +3,18 @@ use std::{ sync::{Arc, Mutex}, }; +use futures_util::FutureExt; use imbl::HashSet; use matrix_sdk::{ config::RequestConfig, + encryption::VerificationState, matrix_auth::{MatrixSession, MatrixSessionTokens}, Client, }; -use matrix_sdk_base::{crypto::EncryptionSyncChanges, SessionMeta}; +use matrix_sdk_base::SessionMeta; use matrix_sdk_test::{async_test, SyncResponseBuilder}; use ruma::{ - api::{ - client::{keys::upload_signatures::v3::SignedKeys, sync::sync_events::DeviceLists}, - MatrixVersion, - }, - assign, + api::{client::keys::upload_signatures::v3::SignedKeys, MatrixVersion}, encryption::{CrossSigningKey, DeviceKeys}, owned_device_id, owned_user_id, serde::Raw, @@ -28,7 +26,7 @@ use wiremock::{ Mock, MockServer, Request, ResponseTemplate, }; -use crate::mock_sync; +use crate::mock_sync_scoped; #[derive(Debug, Default)] struct Keys { @@ -336,16 +334,28 @@ async fn test_own_verification() { .await .unwrap(); + // Subscribe to verification state updates + let mut verification_state_subscriber = alice.encryption().verification_state(); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Unknown); + server.add_known_device(&device_id); // Have Alice bootstrap cross-signing. bootstrap_cross_signing(&alice).await; - // The local device is considered verified by default. + // The local device is considered verified by default, we need a keys query to + // run let own_device = alice.encryption().get_device(&user_id, &device_id).await.unwrap().unwrap(); assert!(own_device.is_verified()); assert!(!own_device.is_deleted()); + // The device is not considered cross signed yet + assert_eq!( + verification_state_subscriber.next().now_or_never().flatten().unwrap(), + VerificationState::Unverified + ); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Unverified); + // Manually re-verifying doesn't change the outcome. own_device.verify().await.unwrap(); assert!(own_device.is_verified()); @@ -364,6 +374,124 @@ async fn test_own_verification() { // Manually re-verifying doesn't change the outcome. user_identity.verify().await.unwrap(); assert!(user_identity.is_verified()); + + // Force a keys query to pick up the cross-signing state + let mut sync_response_builder = SyncResponseBuilder::new(); + sync_response_builder.add_change_device(&user_id); + + { + let _scope = mock_sync_scoped( + &server.server, + sync_response_builder.build_json_sync_response(), + None, + ) + .await; + alice.sync_once(Default::default()).await.unwrap(); + } + + // The device should now be cross-signed + assert_eq!( + verification_state_subscriber.next().now_or_never().unwrap().unwrap(), + VerificationState::Verified + ); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Verified); +} + +#[async_test] +async fn test_reset_cross_signing_resets_verification() { + let mut server = MockedServer::new().await; + + let user_id = owned_user_id!("@alice:example.org"); + let device_id = owned_device_id!("4L1C3"); + let alice = Client::builder() + .homeserver_url(server.server.uri()) + .server_versions([MatrixVersion::V1_0]) + .request_config(RequestConfig::new().disable_retry()) + .build() + .await + .unwrap(); + alice + .restore_session(MatrixSession { + meta: SessionMeta { user_id: user_id.clone(), device_id: device_id.clone() }, + tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, + }) + .await + .unwrap(); + + // Subscribe to verification state updates + let mut verification_state_subscriber = alice.encryption().verification_state(); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Unknown); + + server.add_known_device(&device_id); + + // Have Alice bootstrap cross-signing. + bootstrap_cross_signing(&alice).await; + + // The device is not considered cross signed yet + assert_eq!( + verification_state_subscriber.next().await.unwrap_or(VerificationState::Unknown), + VerificationState::Unverified + ); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Unverified); + + // Force a keys query to pick up the cross-signing state + let mut sync_response_builder = SyncResponseBuilder::new(); + sync_response_builder.add_change_device(&user_id); + + { + let _scope = mock_sync_scoped( + &server.server, + sync_response_builder.build_json_sync_response(), + None, + ) + .await; + alice.sync_once(Default::default()).await.unwrap(); + } + + // The device should now be cross-signed + assert_eq!( + verification_state_subscriber.next().now_or_never().unwrap().unwrap(), + VerificationState::Verified + ); + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Verified); + + let device_id = owned_device_id!("AliceDevice2"); + let alice2 = Client::builder() + .homeserver_url(server.server.uri()) + .server_versions([MatrixVersion::V1_0]) + .request_config(RequestConfig::new().disable_retry()) + .build() + .await + .unwrap(); + alice2 + .restore_session(MatrixSession { + meta: SessionMeta { user_id: user_id.clone(), device_id: device_id.clone() }, + tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, + }) + .await + .unwrap(); + + server.add_known_device(&device_id); + + // Have Alice bootstrap cross-signing again, this time on her second device. + bootstrap_cross_signing(&alice2).await; + + { + let _scope = mock_sync_scoped( + &server.server, + sync_response_builder.build_json_sync_response(), + None, + ) + .await; + alice.sync_once(Default::default()).await.unwrap(); + } + + // The device shouldn't be cross-signed anymore. + assert_eq!(alice.encryption().verification_state().get(), VerificationState::Unverified); + assert_eq!( + verification_state_subscriber.next().now_or_never().unwrap().unwrap(), + VerificationState::Unverified + ); } #[async_test] @@ -429,8 +557,13 @@ async fn test_unchecked_mutual_verification() { // Have Alice and Bob upload their signed device keys. { let mut sync_response_builder = SyncResponseBuilder::new(); - mock_sync(&server.server, sync_response_builder.build_json_sync_response(), None).await; - alice.sync_once(Default::default()).await.unwrap(); + let response_body = sync_response_builder.build_json_sync_response(); + let _scope = mock_sync_scoped(&server.server, response_body, None).await; + + alice + .sync_once(Default::default()) + .await + .expect("We should be able to sync with Alice so we upload the device keys"); bob.sync_once(Default::default()).await.unwrap(); } @@ -444,8 +577,19 @@ async fn test_unchecked_mutual_verification() { // Run a sync so we do send outgoing requests, including the /keys/query for // getting bob's identity. let mut sync_response_builder = SyncResponseBuilder::new(); - mock_sync(&server.server, sync_response_builder.build_json_sync_response(), None).await; - alice.sync_once(Default::default()).await.unwrap(); + + { + let _scope = mock_sync_scoped( + &server.server, + sync_response_builder.build_json_sync_response(), + None, + ) + .await; + alice + .sync_once(Default::default()) + .await + .expect("We should be able to sync so we get theinitial set of devices"); + } // From the point of view of Alice, Bob now has a device. let alice_bob_device = alice @@ -468,29 +612,21 @@ async fn test_unchecked_mutual_verification() { alice_bob_ident.verify().await.unwrap(); - // Notify Alice's devices that some identify changed, so it does another - // /keys/query request. { - let alice_olm = alice.olm_machine_for_testing().await; - let alice_olm = alice_olm.as_ref().unwrap(); - let changed_devices = &assign!(DeviceLists::default(), { - changed: vec![bob_user_id.clone()] - }); - alice_olm - .receive_sync_changes(EncryptionSyncChanges { - to_device_events: Default::default(), - changed_devices, - one_time_keys_counts: &Default::default(), - unused_fallback_keys: Default::default(), - next_batch_token: None, - }) + // Notify Alice's devices that some identify changed, so it does another + // /keys/query request. + let _scope = mock_sync_scoped( + &server.server, + sync_response_builder.add_change_device(&bob_user_id).build_json_sync_response(), + None, + ) + .await; + alice + .sync_once(Default::default()) .await - .unwrap(); + .expect("We should be able to sync to get notified about the changed device"); } - mock_sync(&server.server, sync_response_builder.build_json_sync_response(), None).await; - alice.sync_once(Default::default()).await.unwrap(); - let alice_bob_ident = alice .encryption() .get_user_identity(&bob_user_id) diff --git a/crates/matrix-sdk/tests/integration/event_cache.rs b/crates/matrix-sdk/tests/integration/event_cache.rs new file mode 100644 index 00000000000..7feba7c4b88 --- /dev/null +++ b/crates/matrix-sdk/tests/integration/event_cache.rs @@ -0,0 +1,589 @@ +use std::time::Duration; + +use assert_matches2::{assert_let, assert_matches}; +use matrix_sdk::{ + event_cache::{BackPaginationOutcome, EventCacheError, RoomEventCacheUpdate}, + test_utils::logged_in_client_with_server, +}; +use matrix_sdk_common::deserialized_responses::SyncTimelineEvent; +use matrix_sdk_test::{ + async_test, sync_timeline_event, EventBuilder, JoinedRoomBuilder, SyncResponseBuilder, +}; +use ruma::{ + event_id, + events::{ + room::message::{MessageType, RoomMessageEventContent}, + AnySyncMessageLikeEvent, AnySyncTimelineEvent, AnyTimelineEvent, + }, + room_id, + serde::Raw, + user_id, +}; +use serde_json::json; +use tokio::{spawn, time::timeout}; +use wiremock::{ + matchers::{header, method, path_regex, query_param}, + Mock, MockServer, ResponseTemplate, +}; + +use crate::mock_sync; + +#[track_caller] +fn assert_event_matches_msg(event: &SyncTimelineEvent, expected: &str) { + let event = event.event.deserialize().unwrap(); + assert_let!( + AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage(message)) = event + ); + let message = message.as_original().unwrap(); + assert_let!(MessageType::Text(text) = &message.content.msgtype); + assert_eq!(text.body, expected); +} + +#[async_test] +async fn test_must_explicitly_subscribe() { + let (client, server) = logged_in_client_with_server().await; + + let room_id = room_id!("!omelette:fromage.fr"); + + // Make sure the client is aware of the room. + { + let mut sync_builder = SyncResponseBuilder::new(); + sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id)); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + } + + // If I create a room event subscriber for a room before subscribing the event + // cache, + let room = client.get_room(room_id).unwrap(); + let result = room.event_cache().await; + + // Then it fails, because one must explicitly call `.subscribe()` on the event + // cache. + assert_matches!(result, Err(EventCacheError::NotSubscribedYet)); +} + +#[async_test] +async fn test_add_initial_events() { + let (client, server) = logged_in_client_with_server().await; + + // Immediately subscribe the event cache to sync updates. + client.event_cache().subscribe().unwrap(); + + // If I sync and get informed I've joined The Room, but with no events, + let room_id = room_id!("!omelette:fromage.fr"); + + let mut sync_builder = SyncResponseBuilder::new(); + sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id)); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + + // If I create a room event subscriber, + + let room = client.get_room(room_id).unwrap(); + let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); + let (events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); + + // Then at first it's empty, and the subscriber doesn't yield anything. + assert!(events.is_empty()); + assert!(subscriber.is_empty()); + + // And after a sync, yielding updates to two rooms, + sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id).add_timeline_event( + EventBuilder::new().make_sync_message_event( + user_id!("@dexter:lab.org"), + RoomMessageEventContent::text_plain("bonjour monde"), + ), + )); + + sync_builder.add_joined_room( + JoinedRoomBuilder::new(room_id!("!parallel:universe.uk")).add_timeline_event( + EventBuilder::new().make_sync_message_event( + user_id!("@dexter:lab.org"), + RoomMessageEventContent::text_plain("hi i'm learning French"), + ), + ), + ); + + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + + // It does receive one update, + let update = timeout(Duration::from_secs(2), subscriber.recv()) + .await + .expect("timeout after receiving a sync update") + .expect("should've received a room event cache update"); + + // Which contains the event that was sent beforehand. + assert_let!(RoomEventCacheUpdate::Append { events, .. } = update); + assert_eq!(events.len(), 1); + assert_event_matches_msg(&events[0], "bonjour monde"); + + // And when I later add initial events to this room, + + // XXX: when we get rid of `add_initial_events`, we can keep this test as a + // smoke test for the event cache. + client + .event_cache() + .add_initial_events( + room_id, + vec![SyncTimelineEvent::new(sync_timeline_event!({ + "sender": "@dexter:lab.org", + "type": "m.room.message", + "event_id": "$ida", + "origin_server_ts": 12344446, + "content": { "body":"new choice!", "msgtype": "m.text" }, + }))], + None, + ) + .await + .unwrap(); + + // Then I receive an update that the room has been cleared, + let update = timeout(Duration::from_secs(2), subscriber.recv()) + .await + .expect("timeout after receiving a sync update") + .expect("should've received a room event cache update"); + assert_let!(RoomEventCacheUpdate::Clear = update); + + // Before receiving the "initial" event. + let update = timeout(Duration::from_secs(2), subscriber.recv()) + .await + .expect("timeout after receiving a sync update") + .expect("should've received a room event cache update"); + assert_let!(RoomEventCacheUpdate::Append { events, .. } = update); + assert_eq!(events.len(), 1); + assert_event_matches_msg(&events[0], "new choice!"); + + // That's all, folks! + assert!(subscriber.is_empty()); +} + +macro_rules! non_sync_events { + ( @_ $builder:expr, [ ( $room_id:expr , $event_id:literal : $msg:literal ) $(, $( $rest:tt )* )? ] [ $( $accumulator:tt )* ] ) => { + non_sync_events!( + @_ $builder, + [ $( $( $rest )* )? ] + [ $( $accumulator )* + $builder.make_message_event_with_id( + user_id!("@a:b.c"), + $room_id, + event_id!($event_id), + RoomMessageEventContent::text_plain($msg) + ), + ] + ) + }; + + ( @_ $builder:expr, [] [ $( $accumulator:tt )* ] ) => { + vec![ $( $accumulator )* ] + }; + + ( $builder:expr, [ $( $all:tt )* ] ) => { + non_sync_events!( @_ $builder, [ $( $all )* ] [] ) + }; +} + +/// Puts a mounting point for /messages for a pagination request, matching +/// against a precise `from` token given as `expected_from`, and returning the +/// chunk of events and the next token as `end` (if available). +async fn mock_messages( + server: &MockServer, + expected_from: &str, + next_token: Option<&str>, + chunk: Vec>, +) { + let response_json = json!({ + "chunk": chunk, + "start": "t392-516_47314_0_7_1_1_1_11444_1", + "end": next_token, + }); + Mock::given(method("GET")) + .and(path_regex(r"^/_matrix/client/r0/rooms/.*/messages$")) + .and(header("authorization", "Bearer 1234")) + .and(query_param("from", expected_from)) + .respond_with(ResponseTemplate::new(200).set_body_json(response_json)) + .expect(1) + .mount(server) + .await; +} + +#[async_test] +async fn test_backpaginate_once() { + let (client, server) = logged_in_client_with_server().await; + + let event_cache = client.event_cache(); + + // Immediately subscribe the event cache to sync updates. + event_cache.subscribe().unwrap(); + + // If I sync and get informed I've joined The Room, and get a previous batch + // token, + let room_id = room_id!("!omelette:fromage.fr"); + + let event_builder = EventBuilder::new(); + let mut sync_builder = SyncResponseBuilder::new(); + + { + sync_builder.add_joined_room( + JoinedRoomBuilder::new(room_id) + // Note to self: a timeline must have at least single event to be properly + // serialized. + .add_timeline_event(event_builder.make_sync_message_event( + user_id!("@a:b.c"), + RoomMessageEventContent::text_plain("heyo"), + )) + .set_timeline_prev_batch("prev_batch".to_owned()), + ); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + } + + let (room_event_cache, _drop_handles) = + client.get_room(room_id).unwrap().event_cache().await.unwrap(); + + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); + + // This is racy: either the initial message has been processed by the event + // cache (and no room updates will happen in this case), or it hasn't, and + // the stream will return the next message soon. + if events.is_empty() { + let _ = room_stream.recv().await.expect("read error"); + } else { + assert_eq!(events.len(), 1); + } + + let outcome = { + // Note: events must be presented in reversed order, since this is + // back-pagination. + mock_messages( + &server, + "prev_batch", + None, + non_sync_events!(event_builder, [ (room_id, "$2": "world"), (room_id, "$3": "hello") ]), + ) + .await; + + // Then if I backpaginate, + let token = room_event_cache + .oldest_backpagination_token(Some(Duration::from_secs(1))) + .await + .unwrap(); + assert!(token.is_some()); + + room_event_cache.backpaginate_with_token(20, token).await.unwrap() + }; + + // I'll get all the previous events, in "reverse" order (same as the response). + assert_let!(BackPaginationOutcome::Success { events, reached_start } = outcome); + assert!(reached_start); + + assert_event_matches_msg(&events[0].clone().into(), "world"); + assert_event_matches_msg(&events[1].clone().into(), "hello"); + assert_eq!(events.len(), 2); + + assert!(room_stream.is_empty()); +} + +#[async_test] +async fn test_backpaginate_multiple_iterations() { + let (client, server) = logged_in_client_with_server().await; + + let event_cache = client.event_cache(); + + // Immediately subscribe the event cache to sync updates. + event_cache.subscribe().unwrap(); + + // If I sync and get informed I've joined The Room, and get a previous batch + // token, + let room_id = room_id!("!omelette:fromage.fr"); + + let event_builder = EventBuilder::new(); + let mut sync_builder = SyncResponseBuilder::new(); + + { + sync_builder.add_joined_room( + JoinedRoomBuilder::new(room_id) + // Note to self: a timeline must have at least single event to be properly + // serialized. + .add_timeline_event(event_builder.make_sync_message_event( + user_id!("@a:b.c"), + RoomMessageEventContent::text_plain("heyo"), + )) + .set_timeline_prev_batch("prev_batch".to_owned()), + ); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + } + + let (room_event_cache, _drop_handles) = + client.get_room(room_id).unwrap().event_cache().await.unwrap(); + + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); + + // This is racy: either the initial message has been processed by the event + // cache (and no room updates will happen in this case), or it hasn't, and + // the stream will return the next message soon. + if events.is_empty() { + let _ = room_stream.recv().await.expect("read error"); + } else { + assert_eq!(events.len(), 1); + } + + let mut num_iterations = 0; + let mut global_events = Vec::new(); + let mut global_reached_start = false; + + // The first back-pagination will return these two. + mock_messages( + &server, + "prev_batch", + Some("prev_batch2"), + non_sync_events!(event_builder, [ (room_id, "$2": "world"), (room_id, "$3": "hello") ]), + ) + .await; + + // The second round of back-pagination will return this one. + mock_messages( + &server, + "prev_batch2", + None, + non_sync_events!(event_builder, [ (room_id, "$4": "oh well"), ]), + ) + .await; + + // Then if I backpaginate in a loop, + while let Some(token) = + room_event_cache.oldest_backpagination_token(Some(Duration::from_secs(1))).await.unwrap() + { + match room_event_cache.backpaginate_with_token(20, Some(token)).await.unwrap() { + BackPaginationOutcome::Success { reached_start, events } => { + if !global_reached_start { + global_reached_start = reached_start; + } + global_events.extend(events); + } + BackPaginationOutcome::UnknownBackpaginationToken => { + panic!("shouldn't run into unknown backpagination error") + } + } + + num_iterations += 1; + } + + // I'll get all the previous events, + assert_eq!(num_iterations, 2); + assert!(global_reached_start); + + assert_event_matches_msg(&global_events[0].clone().into(), "world"); + assert_event_matches_msg(&global_events[1].clone().into(), "hello"); + assert_event_matches_msg(&global_events[2].clone().into(), "oh well"); + assert_eq!(global_events.len(), 3); + + // And next time I'll open the room, I'll get the events in the right order. + let (events, _receiver) = room_event_cache.subscribe().await.unwrap(); + + assert_event_matches_msg(&events[0], "oh well"); + assert_event_matches_msg(&events[1], "hello"); + assert_event_matches_msg(&events[2], "world"); + assert_event_matches_msg(&events[3], "heyo"); + assert_eq!(events.len(), 4); + + assert!(room_stream.is_empty()); +} + +#[async_test] +async fn test_reset_while_backpaginating() { + let (client, server) = logged_in_client_with_server().await; + + let event_cache = client.event_cache(); + + // Immediately subscribe the event cache to sync updates. + event_cache.subscribe().unwrap(); + + // If I sync and get informed I've joined The Room, and get a previous batch + // token, + let room_id = room_id!("!omelette:fromage.fr"); + + let event_builder = EventBuilder::new(); + let mut sync_builder = SyncResponseBuilder::new(); + + { + sync_builder.add_joined_room( + JoinedRoomBuilder::new(room_id) + // Note to self: a timeline must have at least single event to be properly + // serialized. + .add_timeline_event(event_builder.make_sync_message_event( + user_id!("@a:b.c"), + RoomMessageEventContent::text_plain("heyo"), + )) + .set_timeline_prev_batch("first_backpagination".to_owned()), + ); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + } + + let (room_event_cache, _drop_handles) = + client.get_room(room_id).unwrap().event_cache().await.unwrap(); + + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); + + // This is racy: either the initial message has been processed by the event + // cache (and no room updates will happen in this case), or it hasn't, and + // the stream will return the next message soon. + if events.is_empty() { + let _ = room_stream.recv().await.expect("read error"); + } else { + assert_eq!(events.len(), 1); + } + + // We're going to cause a small race: + // - a background request to sync will be sent, + // - a backpagination will be sent concurrently. + // + // So events have to happen in this order: + // - the backpagination request is sent, with a prev-batch A + // - the sync endpoint returns *after* the backpagination started, before the + // backpagination ends + // - the backpagination ends, with a prev-batch token that's now stale. + // + // The backpagination should result in an unknown-token-error. + + sync_builder.add_joined_room( + JoinedRoomBuilder::new(room_id) + // Note to self: a timeline must have at least single event to be properly + // serialized. + .add_timeline_event(event_builder.make_sync_message_event( + user_id!("@a:b.c"), + RoomMessageEventContent::text_plain("heyo"), + )) + .set_timeline_prev_batch("second_backpagination".to_owned()) + .set_timeline_limited(), + ); + let sync_response_body = sync_builder.build_json_sync_response(); + + // First back-pagination request: + let chunk = non_sync_events!(event_builder, [ (room_id, "$2": "lalala") ]); + let response_json = json!({ + "chunk": chunk, + "start": "t392-516_47314_0_7_1_1_1_11444_1", + }); + Mock::given(method("GET")) + .and(path_regex(r"^/_matrix/client/r0/rooms/.*/messages$")) + .and(header("authorization", "Bearer 1234")) + .and(query_param("from", "first_backpagination")) + .respond_with( + ResponseTemplate::new(200) + .set_body_json(response_json.clone()) + .set_delay(Duration::from_millis(500)), /* This is why we don't use + * `mock_messages`. */ + ) + .expect(1) + .mount(&server) + .await; + + let first_token = + room_event_cache.oldest_backpagination_token(Some(Duration::from_secs(1))).await.unwrap(); + assert!(first_token.is_some()); + + let rec = room_event_cache.clone(); + let first_token_clone = first_token.clone(); + let backpagination = + spawn(async move { rec.backpaginate_with_token(20, first_token_clone).await }); + + // Receive the sync response (which clears the timeline). + mock_sync(&server, sync_response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + + let outcome = backpagination.await.expect("join failed").unwrap(); + + // Backpagination should be confused, and the operation should result in an + // unknown token. + assert_matches!(outcome, BackPaginationOutcome::UnknownBackpaginationToken); + + // Now if we retrieve the earliest token, it's not the one we had before. + let second_token = room_event_cache.oldest_backpagination_token(None).await.unwrap().unwrap(); + assert!(first_token.unwrap() != second_token); + assert_eq!(second_token.0, "second_backpagination"); +} + +#[async_test] +async fn test_backpaginating_without_token() { + let (client, server) = logged_in_client_with_server().await; + + let event_cache = client.event_cache(); + + // Immediately subscribe the event cache to sync updates. + event_cache.subscribe().unwrap(); + + // If I sync and get informed I've joined The Room, without a previous batch + // token, + let room_id = room_id!("!omelette:fromage.fr"); + + let event_builder = EventBuilder::new(); + let mut sync_builder = SyncResponseBuilder::new(); + + { + sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id)); + let response_body = sync_builder.build_json_sync_response(); + + mock_sync(&server, response_body, None).await; + client.sync_once(Default::default()).await.unwrap(); + server.reset().await; + } + + let (room_event_cache, _drop_handles) = + client.get_room(room_id).unwrap().event_cache().await.unwrap(); + + let (events, room_stream) = room_event_cache.subscribe().await.unwrap(); + + assert!(events.is_empty()); + assert!(room_stream.is_empty()); + + Mock::given(method("GET")) + .and(path_regex(r"^/_matrix/client/r0/rooms/.*/messages$")) + .and(header("authorization", "Bearer 1234")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "chunk": non_sync_events!(event_builder, [(room_id, "$2": "hi")]), + "start": "t392-516_47314_0_7_1_1_1_11444_1", + }))) + .expect(1) + .mount(&server) + .await; + + // We don't have a token. + let token = + room_event_cache.oldest_backpagination_token(Some(Duration::from_secs(1))).await.unwrap(); + assert!(token.is_none()); + + // If we try to back-paginate with a token, it will hit the end of the timeline + // and give us the resulting event. + let outcome = room_event_cache.backpaginate_with_token(20, token).await.unwrap(); + assert_let!(BackPaginationOutcome::Success { events, reached_start } = outcome); + + assert!(reached_start); + + // And we get notified about the new event. + assert_event_matches_msg(&events[0].clone().into(), "hi"); + assert_eq!(events.len(), 1); + + assert!(room_stream.is_empty()); +} diff --git a/crates/matrix-sdk/tests/integration/main.rs b/crates/matrix-sdk/tests/integration/main.rs index e04e54f3ba9..a5fd1eeac28 100644 --- a/crates/matrix-sdk/tests/integration/main.rs +++ b/crates/matrix-sdk/tests/integration/main.rs @@ -1,23 +1,18 @@ // The http mocking library is not supported for wasm32 #![cfg(not(target_arch = "wasm32"))] -use matrix_sdk::{ - config::{RequestConfig, SyncSettings}, - matrix_auth::{MatrixSession, MatrixSessionTokens}, - Client, ClientBuilder, -}; -use matrix_sdk_base::SessionMeta; +use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server, Client}; use matrix_sdk_test::test_json; -use ruma::{api::MatrixVersion, device_id, user_id}; use serde::Serialize; use wiremock::{ matchers::{header, method, path, path_regex, query_param, query_param_is_missing}, - Mock, MockServer, ResponseTemplate, + Mock, MockGuard, MockServer, ResponseTemplate, }; mod client; #[cfg(feature = "e2e-encryption")] mod encryption; +mod event_cache; mod matrix_auth; mod notification; mod refresh_token; @@ -27,36 +22,8 @@ mod widget; matrix_sdk_test::init_tracing_for_tests!(); -async fn test_client_builder() -> (ClientBuilder, MockServer) { - let server = MockServer::start().await; - let builder = - Client::builder().homeserver_url(server.uri()).server_versions([MatrixVersion::V1_0]); - (builder, server) -} - -async fn no_retry_test_client() -> (Client, MockServer) { - let (builder, server) = test_client_builder().await; - let client = - builder.request_config(RequestConfig::new().disable_retry()).build().await.unwrap(); - (client, server) -} - -async fn logged_in_client() -> (Client, MockServer) { - let session = MatrixSession { - meta: SessionMeta { - user_id: user_id!("@example:localhost").to_owned(), - device_id: device_id!("DEVICEID").to_owned(), - }, - tokens: MatrixSessionTokens { access_token: "1234".to_owned(), refresh_token: None }, - }; - let (client, server) = no_retry_test_client().await; - client.restore_session(session).await.unwrap(); - - (client, server) -} - async fn synced_client() -> (Client, MockServer) { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; let sync_settings = SyncSettings::new(); @@ -86,6 +53,28 @@ async fn mock_sync(server: &MockServer, response_body: impl Serialize, since: Op .await; } +/// Mount a Mock on the given server to handle the `GET /sync` endpoint with +/// an optional `since` param that returns a 200 status code with the given +/// response body. +async fn mock_sync_scoped( + server: &MockServer, + response_body: impl Serialize, + since: Option, +) -> MockGuard { + let mut builder = Mock::given(method("GET")).and(path("/_matrix/client/r0/sync")); + + if let Some(since) = since { + builder = builder.and(query_param("since", since)); + } else { + builder = builder.and(query_param_is_missing("since")); + } + + builder + .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) + .mount_as_scoped(server) + .await +} + /// Mount a Mock on the given server to handle the `GET /// /rooms/.../state/m.room.encryption` endpoint with an option whether it /// should return an encryption event or not. diff --git a/crates/matrix-sdk/tests/integration/matrix_auth.rs b/crates/matrix-sdk/tests/integration/matrix_auth.rs index 8917fcbf129..acee16bf2ca 100644 --- a/crates/matrix-sdk/tests/integration/matrix_auth.rs +++ b/crates/matrix-sdk/tests/integration/matrix_auth.rs @@ -4,6 +4,7 @@ use assert_matches::assert_matches; use matrix_sdk::{ config::RequestConfig, matrix_auth::{MatrixSession, MatrixSessionTokens}, + test_utils::{logged_in_client_with_server, no_retry_test_client_with_server}, AuthApi, AuthSession, Client, RumaApiError, }; use matrix_sdk_base::SessionMeta; @@ -31,11 +32,9 @@ use wiremock::{ Mock, MockServer, Request, ResponseTemplate, }; -use crate::{logged_in_client, no_retry_test_client, test_client_builder}; - #[async_test] async fn test_restore_session() { - let (client, _) = logged_in_client().await; + let (client, _) = logged_in_client_with_server().await; let auth = client.matrix_auth(); assert!(auth.logged_in(), "Client should be logged in with the MatrixAuth API"); @@ -46,7 +45,7 @@ async fn test_restore_session() { #[async_test] async fn test_login() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; let homeserver = Url::parse(&server.uri()).unwrap(); Mock::given(method("GET")) @@ -85,7 +84,7 @@ async fn test_login() { #[async_test] async fn test_login_with_discovery() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -103,7 +102,7 @@ async fn test_login_with_discovery() { #[async_test] async fn test_login_no_discovery() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -122,7 +121,7 @@ async fn test_login_no_discovery() { #[async_test] #[cfg(feature = "sso-login")] async fn test_login_with_sso() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -159,7 +158,7 @@ async fn test_login_with_sso() { #[async_test] async fn test_login_with_sso_token() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("GET")) .and(path("/_matrix/client/r0/login")) @@ -194,7 +193,7 @@ async fn test_login_with_sso_token() { #[async_test] async fn test_login_error() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -225,7 +224,7 @@ async fn test_login_error() { #[async_test] async fn test_register_error() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/register")) @@ -595,7 +594,7 @@ async fn test_login_doesnt_fail_if_cross_signing_bootstrapping_failed() { async fn test_login_with_cross_signing_bootstrapping_already_bootstrapped() { // Even if we enabled cross-signing bootstrap for another device, it won't // restart the procedure. - let (builder, server) = test_client_builder().await; + let (builder, server) = matrix_sdk::test_utils::test_client_builder_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) diff --git a/crates/matrix-sdk/tests/integration/notification.rs b/crates/matrix-sdk/tests/integration/notification.rs index 8efab4cd9f4..1f4cf52d60d 100644 --- a/crates/matrix-sdk/tests/integration/notification.rs +++ b/crates/matrix-sdk/tests/integration/notification.rs @@ -15,11 +15,11 @@ use stream_assert::{assert_pending, assert_ready}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; -use crate::{logged_in_client, mock_sync}; +use crate::{logged_in_client_with_server, mock_sync}; #[async_test] async fn notifications_joined() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let room_id = room_id!("!joined_room:localhost"); let user_id = client.user_id().unwrap(); @@ -103,7 +103,7 @@ async fn notifications_joined() { #[async_test] async fn notifications_invite() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let room_id = room_id!("!invited_room:localhost"); let user_id = client.user_id().unwrap(); diff --git a/crates/matrix-sdk/tests/integration/refresh_token.rs b/crates/matrix-sdk/tests/integration/refresh_token.rs index 55b50ade33b..567809cf702 100644 --- a/crates/matrix-sdk/tests/integration/refresh_token.rs +++ b/crates/matrix-sdk/tests/integration/refresh_token.rs @@ -10,6 +10,10 @@ use matrix_sdk::{ config::RequestConfig, executor::spawn, matrix_auth::{MatrixSession, MatrixSessionTokens}, + test_utils::{ + logged_in_client_with_server, no_retry_test_client_with_server, + test_client_builder_with_server, + }, HttpError, RefreshTokenError, SessionChange, }; use matrix_sdk_base::SessionMeta; @@ -28,8 +32,6 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, no_retry_test_client, test_client_builder}; - fn session() -> MatrixSession { MatrixSession { meta: SessionMeta { @@ -45,7 +47,7 @@ fn session() -> MatrixSession { #[async_test] async fn test_login_username_refresh_token() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -74,7 +76,7 @@ async fn test_login_username_refresh_token() { #[async_test] #[cfg(feature = "sso-login")] async fn login_sso_refresh_token() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/login")) @@ -118,7 +120,7 @@ async fn login_sso_refresh_token() { #[async_test] async fn register_refresh_token() { - let (client, server) = no_retry_test_client().await; + let (client, server) = no_retry_test_client_with_server().await; Mock::given(method("POST")) .and(path("/_matrix/client/r0/register")) @@ -147,7 +149,7 @@ async fn register_refresh_token() { #[async_test] async fn no_refresh_token() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; // Refresh token doesn't change. Mock::given(method("POST")) @@ -163,7 +165,7 @@ async fn no_refresh_token() { #[async_test] async fn test_refresh_token() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -240,7 +242,7 @@ async fn test_refresh_token() { #[async_test] async fn refresh_token_not_handled() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -274,7 +276,7 @@ async fn refresh_token_not_handled() { #[async_test] async fn refresh_token_handled_success() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -334,7 +336,7 @@ async fn refresh_token_handled_success() { #[async_test] async fn refresh_token_handled_failure() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -384,7 +386,7 @@ async fn refresh_token_handled_failure() { #[async_test] async fn refresh_token_handled_multi_success() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -457,7 +459,7 @@ async fn refresh_token_handled_multi_success() { #[async_test] async fn refresh_token_handled_multi_failure() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) @@ -530,7 +532,7 @@ async fn refresh_token_handled_multi_failure() { #[async_test] async fn refresh_token_handled_other_error() { - let (builder, server) = test_client_builder().await; + let (builder, server) = test_client_builder_with_server().await; let client = builder .request_config(RequestConfig::new().disable_retry()) .server_versions([MatrixVersion::V1_3]) diff --git a/crates/matrix-sdk/tests/integration/room/common.rs b/crates/matrix-sdk/tests/integration/room/common.rs index e9480487e49..366975458d7 100644 --- a/crates/matrix-sdk/tests/integration/room/common.rs +++ b/crates/matrix-sdk/tests/integration/room/common.rs @@ -20,11 +20,11 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::{logged_in_client_with_server, mock_sync}; #[async_test] async fn user_presence() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; @@ -48,7 +48,7 @@ async fn user_presence() { #[async_test] async fn calculate_room_names_from_summary() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::DEFAULT_SYNC_SUMMARY, None).await; @@ -61,7 +61,7 @@ async fn calculate_room_names_from_summary() { #[async_test] async fn room_names() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; @@ -89,7 +89,7 @@ async fn room_names() { #[async_test] async fn test_state_event_getting() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync = json!({ "next_batch": "1234", @@ -177,7 +177,7 @@ async fn test_state_event_getting() { #[async_test] async fn room_route() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut ev_builder = SyncResponseBuilder::new(); let room_id = &*DEFAULT_TEST_ROOM_ID; @@ -349,7 +349,7 @@ async fn room_route() { #[async_test] async fn room_permalink() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut ev_builder = SyncResponseBuilder::new(); let room_id = room_id!("!test_room:127.0.0.1"); @@ -438,7 +438,7 @@ async fn room_permalink() { #[async_test] async fn room_event_permalink() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut ev_builder = SyncResponseBuilder::new(); let room_id = room_id!("!test_room:127.0.0.1"); let event_id = event_id!("$15139375512JaHAW"); @@ -503,7 +503,7 @@ async fn room_event_permalink() { async fn event() { let event_id = event_id!("$foun39djjod0f"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut ev_builder = SyncResponseBuilder::new(); diff --git a/crates/matrix-sdk/tests/integration/room/joined.rs b/crates/matrix-sdk/tests/integration/room/joined.rs index 3cafa0ebfef..442ee5b67f1 100644 --- a/crates/matrix-sdk/tests/integration/room/joined.rs +++ b/crates/matrix-sdk/tests/integration/room/joined.rs @@ -10,17 +10,17 @@ use matrix_sdk::{ Thumbnail, }, config::SyncSettings, - room::{Receipts, ReportedContentScore}, + room::{Receipts, ReportedContentScore, RoomMemberRole}, }; use matrix_sdk_base::RoomState; use matrix_sdk_test::{ - async_test, test_json, EphemeralTestEvent, JoinedRoomBuilder, SyncResponseBuilder, - DEFAULT_TEST_ROOM_ID, + async_test, test_json, test_json::sync::CUSTOM_ROOM_POWER_LEVELS, EphemeralTestEvent, + JoinedRoomBuilder, SyncResponseBuilder, DEFAULT_TEST_ROOM_ID, }; use ruma::{ api::client::{membership::Invite3pidInit, receipt::create_receipt::v3::ReceiptType}, assign, event_id, - events::{receipt::ReceiptThread, room::message::RoomMessageEventContent}, + events::{receipt::ReceiptThread, room::message::RoomMessageEventContent, TimelineEventType}, int, mxc_uri, owned_event_id, room_id, thirdparty, uint, user_id, OwnedUserId, TransactionId, }; use serde_json::json; @@ -29,11 +29,11 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync, synced_client}; +use crate::{logged_in_client_with_server, mock_encryption_state, mock_sync, synced_client}; #[async_test] async fn invite_user_by_id() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/invite$")) @@ -56,7 +56,7 @@ async fn invite_user_by_id() { #[async_test] async fn invite_user_by_3pid() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/invite$")) @@ -88,7 +88,7 @@ async fn invite_user_by_3pid() { #[async_test] async fn leave_room() -> Result<(), anyhow::Error> { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/leave$")) @@ -114,7 +114,7 @@ async fn leave_room() -> Result<(), anyhow::Error> { #[async_test] async fn ban_user() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/ban$")) @@ -137,7 +137,7 @@ async fn ban_user() { #[async_test] async fn unban_user() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/unban$")) @@ -160,7 +160,7 @@ async fn unban_user() { #[async_test] async fn test_mark_as_unread() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex( @@ -186,7 +186,7 @@ async fn test_mark_as_unread() { #[async_test] async fn kick_user() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/kick$")) @@ -209,7 +209,7 @@ async fn kick_user() { #[async_test] async fn send_single_receipt() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/receipt")) @@ -232,7 +232,7 @@ async fn send_single_receipt() { #[async_test] async fn send_multiple_receipts() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/read_markers$")) @@ -256,7 +256,7 @@ async fn send_multiple_receipts() { #[async_test] async fn typing_notice() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/typing")) @@ -280,7 +280,7 @@ async fn typing_notice() { async fn room_state_event_send() { use ruma::events::room::member::{MembershipState, RoomMemberEventContent}; - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/state/.*")) @@ -308,7 +308,7 @@ async fn room_state_event_send() { #[async_test] async fn room_message_send() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/send/.*")) @@ -335,7 +335,7 @@ async fn room_message_send() { #[async_test] async fn room_attachment_send() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/send/.*")) @@ -383,7 +383,7 @@ async fn room_attachment_send() { #[async_test] async fn room_attachment_send_info() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/send/.*")) @@ -435,7 +435,7 @@ async fn room_attachment_send_info() { #[async_test] async fn room_attachment_send_wrong_info() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/send/.*")) @@ -486,7 +486,7 @@ async fn room_attachment_send_wrong_info() { #[async_test] async fn room_attachment_send_info_thumbnail() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/send/.*")) @@ -644,7 +644,7 @@ async fn set_name() { #[async_test] async fn report_content() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let reason = "I am offended"; let score = int!(-80); @@ -677,7 +677,7 @@ async fn report_content() { #[async_test] async fn subscribe_to_typing_notifications() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let typing_sequences: Arc>>> = Arc::new(Mutex::new(Vec::new())); // The expected typing sequences that we will receive, note that the current // user_id is filtered out. @@ -748,3 +748,107 @@ async fn subscribe_to_typing_notifications() { join_handle.await.unwrap(); assert_eq!(typing_sequences.lock().unwrap().to_vec(), asserted_typing_sequences); } + +#[async_test] +async fn get_suggested_user_role() { + let (client, server) = logged_in_client_with_server().await; + + mock_sync(&server, &*test_json::DEFAULT_SYNC_SUMMARY, None).await; + + let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); + let _response = client.sync_once(sync_settings).await.unwrap(); + let room = client.get_room(&DEFAULT_TEST_ROOM_ID).unwrap(); + + let role_admin = room.get_suggested_user_role(user_id!("@example:localhost")).await.unwrap(); + assert_eq!(role_admin, RoomMemberRole::Administrator); + + // This user either does not exist in the room or has no special role + let role_unknown = + room.get_suggested_user_role(user_id!("@non-existing:localhost")).await.unwrap(); + assert_eq!(role_unknown, RoomMemberRole::User); +} + +#[async_test] +async fn get_power_level_for_user() { + let (client, server) = logged_in_client_with_server().await; + + mock_sync(&server, &*test_json::DEFAULT_SYNC_SUMMARY, None).await; + + let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); + let _response = client.sync_once(sync_settings).await.unwrap(); + let room = client.get_room(&DEFAULT_TEST_ROOM_ID).unwrap(); + + let power_level_admin = + room.get_user_power_level(user_id!("@example:localhost")).await.unwrap(); + assert_eq!(power_level_admin, 100); + + // This user either does not exist in the room or has no special power level + let power_level_unknown = + room.get_user_power_level(user_id!("@non-existing:localhost")).await.unwrap(); + assert_eq!(power_level_unknown, 0); +} + +#[async_test] +async fn get_users_with_power_levels() { + let (client, server) = logged_in_client_with_server().await; + + mock_sync(&server, &*test_json::sync::SYNC_ADMIN_AND_MOD, None).await; + + let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); + let _response = client.sync_once(sync_settings).await.unwrap(); + let room = client.get_room(&DEFAULT_TEST_ROOM_ID).unwrap(); + + let users_with_power_levels = room.users_with_power_levels().await; + assert_eq!(users_with_power_levels.len(), 2); + assert_eq!(users_with_power_levels[user_id!("@admin:localhost")], 100); + assert_eq!(users_with_power_levels[user_id!("@mod:localhost")], 50); +} + +#[async_test] +async fn get_users_with_power_levels_is_empty_if_power_level_info_is_not_available() { + let (client, server) = logged_in_client_with_server().await; + + mock_sync(&server, &*test_json::INVITE_SYNC, None).await; + + let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); + let _response = client.sync_once(sync_settings).await.unwrap(); + // The room doesn't have any power level info + let room = client.get_room(room_id!("!696r7674:example.com")).unwrap(); + + assert!(room.users_with_power_levels().await.is_empty()); +} + +#[async_test] +async fn reset_power_levels() { + let (client, server) = logged_in_client_with_server().await; + + mock_sync(&server, &*CUSTOM_ROOM_POWER_LEVELS, None).await; + + let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); + let _response = client.sync_once(sync_settings).await.unwrap(); + let room = client.get_room(&DEFAULT_TEST_ROOM_ID).unwrap(); + + Mock::given(method("PUT")) + .and(path_regex(r"^/_matrix/client/r0/rooms/.*/state/m.room.power_levels/$")) + .and(header("authorization", "Bearer 1234")) + .and(body_partial_json(json!({ + "events": { + // 'm.room.avatar' is 100 here, if we receive a value '50', the reset worked + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100, + "m.room.topic": 50 + }, + }))) + .respond_with(ResponseTemplate::new(200).set_body_json(&*test_json::EVENT_ID)) + .expect(1) + .mount(&server) + .await; + + let initial_power_levels = room.room_power_levels().await.unwrap(); + assert_eq!(initial_power_levels.events[&TimelineEventType::RoomAvatar], int!(100)); + + room.reset_power_levels().await.unwrap(); +} diff --git a/crates/matrix-sdk/tests/integration/room/left.rs b/crates/matrix-sdk/tests/integration/room/left.rs index d17a05bc6d9..6b2ccc26bcb 100644 --- a/crates/matrix-sdk/tests/integration/room/left.rs +++ b/crates/matrix-sdk/tests/integration/room/left.rs @@ -9,11 +9,11 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::{logged_in_client_with_server, mock_sync}; #[async_test] async fn forget_room() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/forget$")) @@ -35,7 +35,7 @@ async fn forget_room() { #[async_test] async fn rejoin_room() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; Mock::given(method("POST")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/join")) diff --git a/crates/matrix-sdk/tests/integration/room/notification_mode.rs b/crates/matrix-sdk/tests/integration/room/notification_mode.rs index bca63e6c21e..8f36a6f35e5 100644 --- a/crates/matrix-sdk/tests/integration/room/notification_mode.rs +++ b/crates/matrix-sdk/tests/integration/room/notification_mode.rs @@ -14,13 +14,13 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::{logged_in_client_with_server, mock_sync}; #[async_test] async fn get_notification_mode() { let room_no_rules_id = room_id!("!jEsUZKDJdhlrceRyVU:localhost"); let room_not_joined_id = room_id!("!aBfUOMDJhmtucfVzGa:localhost"); - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); diff --git a/crates/matrix-sdk/tests/integration/room/spaces.rs b/crates/matrix-sdk/tests/integration/room/spaces.rs index 6d1a190dafd..039f3fdfc9d 100644 --- a/crates/matrix-sdk/tests/integration/room/spaces.rs +++ b/crates/matrix-sdk/tests/integration/room/spaces.rs @@ -12,7 +12,7 @@ use wiremock::{ Mock, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync, MockServer}; +use crate::{logged_in_client_with_server, mock_sync, MockServer}; pub static DEFAULT_TEST_SPACE_ID: Lazy<&RoomId> = Lazy::new(|| room_id!("!hIMjEx205EXNyjVPCV:localhost")); @@ -153,7 +153,7 @@ async fn sync_space( #[async_test] async fn no_parent_space() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_sync(&server, &*test_json::SYNC, None).await; @@ -170,7 +170,7 @@ async fn no_parent_space() { #[async_test] async fn parent_space_undeserializable() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut sync = PARENT_SPACE_SYNC.clone(); sync["rooms"]["join"][DEFAULT_TEST_ROOM_ID.as_str()]["timeline"]["events"][0]["content"] @@ -186,7 +186,7 @@ async fn parent_space_undeserializable() { #[async_test] async fn parent_space_redacted() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let mut sync = PARENT_SPACE_SYNC.clone(); let timeline = &mut sync["rooms"]["join"][DEFAULT_TEST_ROOM_ID.as_str()]["timeline"]["events"]; @@ -215,7 +215,7 @@ async fn parent_space_redacted() { #[async_test] async fn parent_space_unverifiable() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; initial_sync_with_m_space_parent(&client, &server, &PARENT_SPACE_SYNC).await; @@ -230,7 +230,7 @@ async fn parent_space_unverifiable() { #[async_test] async fn parent_space_illegitimate() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_members(&server).await; @@ -249,7 +249,7 @@ async fn parent_space_illegitimate() { #[async_test] async fn parent_space_reciprocal() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_token = initial_sync_with_m_space_parent(&client, &server, &PARENT_SPACE_SYNC).await; @@ -286,7 +286,7 @@ async fn parent_space_reciprocal() { #[async_test] async fn parent_space_redacted_reciprocal() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; mock_members(&server).await; @@ -374,7 +374,7 @@ async fn setup_parent_member( #[async_test] async fn parent_space_powerlevel() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_token = initial_sync_with_m_space_parent(&client, &server, &PARENT_SPACE_SYNC).await; @@ -391,7 +391,7 @@ async fn parent_space_powerlevel() { #[async_test] async fn parent_space_powerlevel_too_low() { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; let sync_token = initial_sync_with_m_space_parent(&client, &server, &PARENT_SPACE_SYNC).await; diff --git a/crates/matrix-sdk/tests/integration/room/tags.rs b/crates/matrix-sdk/tests/integration/room/tags.rs index 994102329ec..43b964d4ab5 100644 --- a/crates/matrix-sdk/tests/integration/room/tags.rs +++ b/crates/matrix-sdk/tests/integration/room/tags.rs @@ -14,7 +14,7 @@ use wiremock::{ Mock, MockServer, ResponseTemplate, }; -use crate::{logged_in_client, mock_sync}; +use crate::{logged_in_client_with_server, mock_sync}; enum TagOperation { Set, @@ -70,7 +70,7 @@ async fn synced_client_with_room( ev_builder: &mut SyncResponseBuilder, room_id: &RoomId, ) -> (Client, Room, MockServer) { - let (client, server) = logged_in_client().await; + let (client, server) = logged_in_client_with_server().await; ev_builder.add_joined_room(JoinedRoomBuilder::new(room_id)); mock_sync(&server, ev_builder.build_json_sync_response(), None).await; diff --git a/crates/matrix-sdk/tests/integration/widget.rs b/crates/matrix-sdk/tests/integration/widget.rs index e21eeca7e75..9ff2387d28f 100644 --- a/crates/matrix-sdk/tests/integration/widget.rs +++ b/crates/matrix-sdk/tests/integration/widget.rs @@ -48,7 +48,7 @@ use wiremock::{ Mock, MockServer, ResponseTemplate, }; -use crate::{logged_in_client, mock_encryption_state, mock_sync}; +use crate::{logged_in_client_with_server, mock_encryption_state, mock_sync}; /// Create a JSON string from a [`json!`][serde_json::json] "literal". #[macro_export] @@ -70,7 +70,7 @@ async fn run_test_driver(init_on_content_load: bool) -> (Client, MockServer, Wid } } - let (client, mock_server) = logged_in_client().await; + let (client, mock_server) = logged_in_client_with_server().await; let sync_settings = SyncSettings::new().timeout(Duration::from_millis(3000)); let mut sync_builder = SyncResponseBuilder::new(); diff --git a/examples/getting_started/src/main.rs b/examples/getting_started/src/main.rs index 47efa03441d..2e6d9438741 100644 --- a/examples/getting_started/src/main.rs +++ b/examples/getting_started/src/main.rs @@ -40,7 +40,7 @@ async fn main() -> anyhow::Result<()> { "Usage: {} ", env::args().next().unwrap() ); - // exist if missing + // exit if missing exit(1) } }; @@ -143,8 +143,8 @@ async fn on_stripped_state_member( // This fn is called whenever we see a new room message event. You notice that // the difference between this and the other function that we've given to the // handler lies only in their input parameters. However, that is enough for the -// rust-sdk to figure out which one to call one and only do so, when -// the parameters are available. +// rust-sdk to figure out which one to call and only do so, when the parameters +// are available. async fn on_room_message(event: OriginalSyncRoomMessageEvent, room: Room) { // First, we need to unpack the message: We only want messages from rooms we are // still in and that are regular text messages - ignoring everything else. diff --git a/labs/README.md b/labs/README.md index 47e0a70471e..d88110b99e5 100644 --- a/labs/README.md +++ b/labs/README.md @@ -13,8 +13,8 @@ Rust SDK can evolve, feel free to propose an experiment. ## Current experiments -- rrrepl: a *R*ead *R*eceipts REPL, to help with client-side computation of read-receipts. Useful - for debugging. +- multiverse: a TUI client mostly for quick development iteration of SDK features and debugging. + Run with `cargo run --bin multiverse matrix.org ~/.cache/multiverse-cache`. ## Archived experiments diff --git a/labs/rrrepl/Cargo.toml b/labs/multiverse/Cargo.toml similarity index 75% rename from labs/rrrepl/Cargo.toml rename to labs/multiverse/Cargo.toml index 1e6ee419f99..308c563d55f 100644 --- a/labs/rrrepl/Cargo.toml +++ b/labs/multiverse/Cargo.toml @@ -1,23 +1,26 @@ [package] -name = "rrrepl" +name = "multiverse" version = "0.1.0" edition = "2021" publish = false [[bin]] -name = "rrrepl" +name = "multiverse" test = false [dependencies] anyhow = "1" -tokio = { version = "1.24.2", features = ["macros", "rt-multi-thread"] } -url = "2.2.2" -# when copy-pasting this, please use a git dependency or make sure that you -# have copied the example as it was at the time of the release you use. +color-eyre = "0.6.2" +crossterm = "0.27.0" +futures-util = { workspace = true } +imbl = { workspace = true } matrix-sdk = { path = "../../crates/matrix-sdk", features = ["sso-login"] } matrix-sdk-ui = { path = "../../crates/matrix-sdk-ui" } +ratatui = "0.26.1" +rpassword = "7.3.1" +serde_json = { workspace = true } +tokio = { version = "1.24.2", features = ["macros", "rt-multi-thread"] } tracing = { workspace = true } tracing-appender = { version = "0.2.2" } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -futures-util = { workspace = true } -serde_json = { workspace = true } +url = "2.2.2" diff --git a/labs/multiverse/src/main.rs b/labs/multiverse/src/main.rs new file mode 100644 index 00000000000..e6edcf34d69 --- /dev/null +++ b/labs/multiverse/src/main.rs @@ -0,0 +1,861 @@ +use std::{ + collections::HashMap, + env, + io::{self, stdout, Write}, + path::PathBuf, + process::exit, + sync::{Arc, Mutex}, + time::Duration, +}; + +use color_eyre::config::HookBuilder; +use crossterm::{ + event::{self, Event, KeyCode, KeyEventKind}, + terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, + ExecutableCommand, +}; +use futures_util::{pin_mut, StreamExt as _}; +use imbl::Vector; +use matrix_sdk::{ + config::StoreConfig, + encryption::{BackupDownloadStrategy, EncryptionSettings}, + matrix_auth::MatrixSession, + ruma::{ + api::client::receipt::create_receipt::v3::ReceiptType, events::room::message::MessageType, + OwnedRoomId, RoomId, + }, + AuthSession, Client, RoomListEntry, ServerName, SqliteCryptoStore, SqliteStateStore, +}; +use matrix_sdk_ui::{ + room_list_service, + sync_service::{self, SyncService}, + timeline::{ + PaginationOptions, TimelineItem, TimelineItemContent, TimelineItemKind, VirtualTimelineItem, + }, + Timeline as SdkTimeline, +}; +use ratatui::{prelude::*, style::palette::tailwind, widgets::*}; +use tokio::{spawn, task::JoinHandle}; +use tracing::error; +use tracing_subscriber::{layer::SubscriberExt as _, util::SubscriberInitExt as _, EnvFilter}; + +const HEADER_BG: Color = tailwind::BLUE.c950; +const NORMAL_ROW_COLOR: Color = tailwind::SLATE.c950; +const ALT_ROW_COLOR: Color = tailwind::SLATE.c900; +const SELECTED_STYLE_FG: Color = tailwind::BLUE.c300; +const TEXT_COLOR: Color = tailwind::SLATE.c200; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let file_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_writer(tracing_appender::rolling::hourly("/tmp/", "logs-")); + + tracing_subscriber::registry() + .with(EnvFilter::new(std::env::var("RUST_LOG").unwrap_or("".into()))) + .with(file_layer) + .init(); + + // Read the server name from the command line. + let Some(server_name) = env::args().nth(1) else { + eprintln!("Usage: {} ", env::args().next().unwrap()); + exit(1) + }; + + let config_path = env::args().nth(2).unwrap_or("/tmp/".to_owned()); + let client = configure_client(server_name, config_path).await?; + + init_error_hooks()?; + let terminal = init_terminal()?; + + let mut app = App::new(client).await?; + + app.run(terminal).await +} + +fn init_error_hooks() -> anyhow::Result<()> { + let (panic, error) = HookBuilder::default().into_hooks(); + let panic = panic.into_panic_hook(); + let error = error.into_eyre_hook(); + color_eyre::eyre::set_hook(Box::new(move |e| { + let _ = restore_terminal(); + error(e) + }))?; + std::panic::set_hook(Box::new(move |info| { + let _ = restore_terminal(); + panic(info) + })); + Ok(()) +} + +fn init_terminal() -> anyhow::Result> { + enable_raw_mode()?; + stdout().execute(EnterAlternateScreen)?; + let backend = CrosstermBackend::new(stdout()); + let terminal = Terminal::new(backend)?; + Ok(terminal) +} + +fn restore_terminal() -> anyhow::Result<()> { + disable_raw_mode()?; + stdout().execute(LeaveAlternateScreen)?; + Ok(()) +} + +#[derive(Default)] +struct StatefulList { + state: ListState, + items: Arc>>, +} + +#[derive(Default, PartialEq)] +enum DetailsMode { + #[default] + ReadReceipts, + TimelineItems, + // Events // TODO: Soon™ +} + +struct Timeline { + timeline: Arc, + items: Arc>>>, + task: JoinHandle<()>, +} + +struct App { + /// Reference to the main SDK client. + client: Client, + + /// The sync service used for synchronizing events. + sync_service: Arc, + + /// Room list service rooms known to the app. + ui_rooms: Arc>>, + + /// Timelines data structures for each room. + timelines: Arc>>, + + /// Ratatui's list of room list entries. + room_list_entries: StatefulList, + + /// Task listening to room list service changes, and spawning timelines. + listen_task: JoinHandle<()>, + + /// Content of the latest status message, if set. + last_status_message: Arc>>, + + /// A task to automatically clear the status message in N seconds, if set. + clear_status_message: Option>, + + /// What's shown in the details view, aka the right panel. + details_mode: DetailsMode, + + /// The current room that's subscribed to in the room list's sliding sync. + current_room_subscription: Option, + + current_pagination: Arc>>>, +} + +impl App { + async fn new(client: Client) -> anyhow::Result { + let sync_service = Arc::new(SyncService::builder(client.clone()).build().await?); + + let room_list_service = sync_service.room_list_service(); + + let all_rooms = room_list_service.all_rooms().await?; + let (rooms, stream) = all_rooms.entries(); + + let rooms = Arc::new(Mutex::new(rooms)); + let ui_rooms: Arc>> = + Default::default(); + let timelines = Arc::new(Mutex::new(HashMap::new())); + + let r = rooms.clone(); + let ur = ui_rooms.clone(); + let s = sync_service.clone(); + let t = timelines.clone(); + + let listen_task = spawn(async move { + pin_mut!(stream); + let rooms = r; + let ui_rooms = ur; + let sync_service = s; + let timelines = t; + + while let Some(diffs) = stream.next().await { + let all_rooms = { + // Apply the diffs to the list of room entries. + let mut rooms = rooms.lock().unwrap(); + for diff in diffs { + diff.apply(&mut rooms); + } + + // Collect rooms early to release the room entries list lock. + rooms + .iter() + .filter_map(|entry| entry.as_room_id().map(ToOwned::to_owned)) + .collect::>() + }; + + // Clone the previous set of ui rooms to avoid keeping the ui_rooms lock (which + // we couldn't do below, because it's a sync lock, and has to be + // sync b/o rendering; and we'd have to cross await points + // below). + let previous_ui_rooms = ui_rooms.lock().unwrap().clone(); + + let mut new_ui_rooms = HashMap::new(); + let mut new_timelines = Vec::new(); + + // Initialize all the new rooms. + for room_id in + all_rooms.into_iter().filter(|room_id| !previous_ui_rooms.contains_key(room_id)) + { + // Retrieve the room list service's Room. + let Ok(ui_room) = sync_service.room_list_service().room(&room_id).await else { + error!("error when retrieving room after an update"); + continue; + }; + + // Initialize the timeline. + let builder = match ui_room.default_room_timeline_builder().await { + Ok(builder) => builder, + Err(err) => { + error!("error when getting default timeline builder: {err}"); + continue; + } + }; + + if let Err(err) = ui_room.init_timeline_with_builder(builder).await { + error!("error when creating default timeline: {err}"); + } + + // Save the timeline in the cache. + let sdk_timeline = ui_room.timeline().unwrap(); + let (items, stream) = sdk_timeline.subscribe().await; + let items = Arc::new(Mutex::new(items)); + + // Spawn a timeline task that will listen to all the timeline item changes. + let i = items.clone(); + let timeline_task = spawn(async move { + pin_mut!(stream); + let items = i; + while let Some(diff) = stream.next().await { + let mut items = items.lock().unwrap(); + diff.apply(&mut items); + } + }); + + new_timelines.push(( + room_id.clone(), + Timeline { timeline: sdk_timeline, items, task: timeline_task }, + )); + + // Save the room list service room in the cache. + new_ui_rooms.insert(room_id, ui_room); + } + + ui_rooms.lock().unwrap().extend(new_ui_rooms); + timelines.lock().unwrap().extend(new_timelines); + } + }); + + // This will sync (with encryption) until an error happens or the program is + // stopped. + sync_service.start().await; + + Ok(Self { + sync_service, + room_list_entries: StatefulList { state: Default::default(), items: rooms }, + client, + listen_task, + last_status_message: Default::default(), + clear_status_message: None, + ui_rooms, + details_mode: Default::default(), + timelines, + current_room_subscription: None, + current_pagination: Default::default(), + }) + } +} + +impl App { + /// Set the current status message (displayed at the bottom), for a few + /// seconds. + fn set_status_message(&mut self, status: String) { + if let Some(handle) = self.clear_status_message.take() { + // Cancel the previous task to clear the status message. + handle.abort(); + } + + *self.last_status_message.lock().unwrap() = Some(status); + + let message = self.last_status_message.clone(); + self.clear_status_message = Some(spawn(async move { + // Clear the status message in 4 seconds. + tokio::time::sleep(Duration::from_secs(4)).await; + + *message.lock().unwrap() = None; + })); + } + + /// Mark the currently selected room as read. + async fn mark_as_read(&mut self) { + let Some(room) = self + .get_selected_room_id(None) + .and_then(|room_id| self.ui_rooms.lock().unwrap().get(&room_id).cloned()) + else { + self.set_status_message("missing room or nothing to show".to_owned()); + return; + }; + + // Mark as read! + match room.timeline().unwrap().mark_as_read(ReceiptType::Read).await { + Ok(did) => { + self.set_status_message(format!( + "did {}send a read receipt!", + if did { "" } else { "not " } + )); + } + Err(err) => { + self.set_status_message(format!("error when marking a room as read: {err}",)); + } + } + } + + /// Run a small back-pagination (expect a batch of 20 events, continue until + /// we get 10 timeline items or hit the timeline start). + async fn back_paginate(&mut self) { + let Some(sdk_timeline) = self.get_selected_room_id(None).and_then(|room_id| { + self.timelines.lock().unwrap().get(&room_id).map(|timeline| timeline.timeline.clone()) + }) else { + self.set_status_message("missing timeline for room".to_owned()); + return; + }; + + let mut pagination = self.current_pagination.lock().unwrap(); + + // Cancel the previous back-pagination, if any. + if let Some(prev) = pagination.take() { + prev.abort(); + } + + // Start a new one, request batches of 20 events, stop after 10 timeline items + // have been added. + *pagination = Some(spawn(async move { + if let Err(err) = + sdk_timeline.paginate_backwards(PaginationOptions::until_num_items(20, 10)).await + { + // TODO: would be nice to be able to set the status + // message remotely? + //self.set_status_message(format!( + //"Error during backpagination: {err}" + //)); + error!("Error during backpagination: {err}") + } + })); + } + + /// Returns the currently selected room id, if any. + fn get_selected_room_id(&self, selected: Option) -> Option { + let selected = selected.or_else(|| self.room_list_entries.state.selected())?; + + self.room_list_entries + .items + .lock() + .unwrap() + .get(selected) + .cloned() + .and_then(|entry| entry.as_room_id().map(ToOwned::to_owned)) + } + + fn subscribe_to_selected_room(&mut self, selected: usize) { + // Delete the subscription to the previous room, if any. + if let Some(room) = self.current_room_subscription.take() { + room.unsubscribe(); + } + + // Subscribe to the new room. + if let Some(room) = self + .get_selected_room_id(Some(selected)) + .and_then(|room_id| self.ui_rooms.lock().unwrap().get(&room_id).cloned()) + { + room.subscribe(None); + self.current_room_subscription = Some(room); + } + } + + async fn render_loop(&mut self, mut terminal: Terminal) -> anyhow::Result<()> { + loop { + terminal.draw(|f| f.render_widget(&mut *self, f.size()))?; + + if crossterm::event::poll(Duration::from_millis(16))? { + if let Event::Key(key) = event::read()? { + if key.kind == KeyEventKind::Press { + use KeyCode::*; + match key.code { + Char('q') | Esc => return Ok(()), + + Char('j') | Down => { + if let Some(i) = self.room_list_entries.next() { + self.subscribe_to_selected_room(i); + } + } + + Char('k') | Up => { + if let Some(i) = self.room_list_entries.previous() { + self.subscribe_to_selected_room(i); + } + } + + Char('s') => self.sync_service.start().await, + Char('S') => self.sync_service.stop().await?, + Char('r') => self.details_mode = DetailsMode::ReadReceipts, + Char('t') => self.details_mode = DetailsMode::TimelineItems, + + Char('b') if self.details_mode == DetailsMode::TimelineItems => { + self.back_paginate().await; + } + + Char('m') if self.details_mode == DetailsMode::ReadReceipts => { + self.mark_as_read().await + } + + _ => {} + } + } + } + } + } + } + + async fn run(&mut self, terminal: Terminal) -> anyhow::Result<()> { + self.render_loop(terminal).await?; + + // At this point the user has exited the loop, so shut down the application. + restore_terminal()?; + + println!("Closing sync service..."); + + let s = self.sync_service.clone(); + let wait_for_termination = spawn(async move { + while let Some(state) = s.state().next().await { + if !matches!(state, sync_service::State::Running) { + break; + } + } + }); + + self.sync_service.stop().await?; + self.listen_task.abort(); + for timeline in self.timelines.lock().unwrap().values() { + timeline.task.abort(); + } + wait_for_termination.await.unwrap(); + + println!("okthxbye!"); + Ok(()) + } +} + +impl Widget for &mut App { + /// Render the whole app. + fn render(self, area: Rect, buf: &mut Buffer) { + // Create a space for header, todo list and the footer. + let vertical = + Layout::vertical([Constraint::Length(2), Constraint::Min(0), Constraint::Length(2)]); + let [header_area, rest_area, footer_area] = vertical.areas(area); + + // Create two chunks with equal horizontal screen space. One for the list and + // the other for the info block. + let horizontal = + Layout::horizontal([Constraint::Percentage(50), Constraint::Percentage(50)]); + let [lhs, rhs] = horizontal.areas(rest_area); + + self.render_title(header_area, buf); + self.render_left(lhs, buf); + self.render_right(rhs, buf); + self.render_footer(footer_area, buf); + } +} + +impl App { + /// Render the top square (title of the program). + fn render_title(&self, area: Rect, buf: &mut Buffer) { + Paragraph::new("Multiverse").bold().centered().render(area, buf); + } + + /// Renders the left part of the screen, that is, the list of rooms. + fn render_left(&mut self, area: Rect, buf: &mut Buffer) { + // We create two blocks, one is for the header (outer) and the other is for list + // (inner). + let outer_block = Block::default() + .borders(Borders::NONE) + .fg(TEXT_COLOR) + .bg(HEADER_BG) + .title("Room list") + .title_alignment(Alignment::Center); + let inner_block = + Block::default().borders(Borders::NONE).fg(TEXT_COLOR).bg(NORMAL_ROW_COLOR); + + // We get the inner area from outer_block. We'll use this area later to render + // the table. + let outer_area = area; + let inner_area = outer_block.inner(outer_area); + + // We can render the header in outer_area. + outer_block.render(outer_area, buf); + + // Iterate through all elements in the `items` and stylize them. + let items: Vec> = self + .room_list_entries + .items + .lock() + .unwrap() + .iter() + .enumerate() + .map(|(i, item)| { + let bg_color = match i % 2 { + 0 => NORMAL_ROW_COLOR, + _ => ALT_ROW_COLOR, + }; + + let line = if let Some(room) = + item.as_room_id().and_then(|room_id| self.client.get_room(room_id)) + { + format!("#{i} {}", room.room_id()) + } else { + "non-filled room".to_owned() + }; + + let line = Line::styled(line, TEXT_COLOR); + ListItem::new(line).bg(bg_color) + }) + .collect(); + + // Create a List from all list items and highlight the currently selected one. + let items = List::new(items) + .block(inner_block) + .highlight_style( + Style::default() + .add_modifier(Modifier::BOLD) + .add_modifier(Modifier::REVERSED) + .fg(SELECTED_STYLE_FG), + ) + .highlight_symbol(">") + .highlight_spacing(HighlightSpacing::Always); + + StatefulWidget::render(items, inner_area, buf, &mut self.room_list_entries.state); + } + + /// Render the right part of the screen, showing the details of the current + /// view. + fn render_right(&mut self, area: Rect, buf: &mut Buffer) { + // Split the block into two parts: + // - outer_block with the title of the block. + // - inner_block that will contain the actual details. + let outer_block = Block::default() + .borders(Borders::NONE) + .fg(TEXT_COLOR) + .bg(HEADER_BG) + .title("Room view") + .title_alignment(Alignment::Center); + let inner_block = Block::default() + .borders(Borders::NONE) + .bg(NORMAL_ROW_COLOR) + .padding(Padding::horizontal(1)); + + // This is a similar process to what we did for list. outer_info_area will be + // used for header inner_info_area will be used for the list info. + let outer_area = area; + let inner_area = outer_block.inner(outer_area); + + // We can render the header. Inner area will be rendered later. + outer_block.render(outer_area, buf); + + // Helper to render some string as a paragraph. + let render_paragraph = |buf: &mut Buffer, content: String| { + Paragraph::new(content) + .block(inner_block.clone()) + .fg(TEXT_COLOR) + .wrap(Wrap { trim: false }) + .render(inner_area, buf); + }; + + if let Some(room_id) = self.get_selected_room_id(None) { + match self.details_mode { + DetailsMode::ReadReceipts => { + // In read receipts mode, show the read receipts object as computed + // by the client. + match self.ui_rooms.lock().unwrap().get(&room_id).cloned() { + Some(room) => { + let receipts = room.read_receipts(); + render_paragraph( + buf, + format!( + r#"Read receipts: +- unread: {} +- notifications: {} +- mentions: {} + +--- + +{:?} +"#, + receipts.num_unread, + receipts.num_notifications, + receipts.num_mentions, + receipts + ), + ) + } + None => render_paragraph( + buf, + "(room disappeared in the room list service)".to_owned(), + ), + } + } + + DetailsMode::TimelineItems => { + if !self.render_timeline(&room_id, inner_block.clone(), inner_area, buf) { + render_paragraph(buf, "(room's timeline disappeared)".to_owned()) + } + } + } + } else { + render_paragraph(buf, "Nothing to see here...".to_owned()) + }; + } + + /// Renders the list of timeline items for the given room. + fn render_timeline( + &mut self, + room_id: &RoomId, + inner_block: Block<'_>, + inner_area: Rect, + buf: &mut Buffer, + ) -> bool { + let Some(items) = + self.timelines.lock().unwrap().get(room_id).map(|timeline| timeline.items.clone()) + else { + return false; + }; + + let items = items.lock().unwrap(); + let mut content = Vec::new(); + + for item in items.iter() { + match item.kind() { + TimelineItemKind::Event(ev) => { + let sender = ev.sender(); + + match ev.content() { + TimelineItemContent::Message(message) => { + if let MessageType::Text(text) = message.msgtype() { + content.push(format!("{}: {}", sender, text.body)) + } + } + + TimelineItemContent::RedactedMessage => { + content.push(format!("{}: -- redacted --", sender)) + } + TimelineItemContent::UnableToDecrypt(_) => { + content.push(format!("{}: (UTD)", sender)) + } + TimelineItemContent::Sticker(_) + | TimelineItemContent::MembershipChange(_) + | TimelineItemContent::ProfileChange(_) + | TimelineItemContent::OtherState(_) + | TimelineItemContent::FailedToParseMessageLike { .. } + | TimelineItemContent::FailedToParseState { .. } + | TimelineItemContent::Poll(_) + | TimelineItemContent::CallInvite => { + continue; + } + } + } + + TimelineItemKind::Virtual(virt) => match virt { + VirtualTimelineItem::DayDivider(unix_ts) => { + content.push(format!("Date: {unix_ts:?}")); + } + VirtualTimelineItem::ReadMarker => { + content.push("Read marker".to_owned()); + } + }, + } + } + + let list_items = content + .into_iter() + .enumerate() + .map(|(i, line)| { + let bg_color = match i % 2 { + 0 => NORMAL_ROW_COLOR, + _ => ALT_ROW_COLOR, + }; + let line = Line::styled(line, TEXT_COLOR); + ListItem::new(line).bg(bg_color) + }) + .collect::>(); + + let list = List::new(list_items) + .block(inner_block) + .highlight_style( + Style::default() + .add_modifier(Modifier::BOLD) + .add_modifier(Modifier::REVERSED) + .fg(SELECTED_STYLE_FG), + ) + .highlight_symbol(">") + .highlight_spacing(HighlightSpacing::Always); + + let mut dummy_list_state = ListState::default(); + StatefulWidget::render(list, inner_area, buf, &mut dummy_list_state); + true + } + + /// Render the bottom part of the screen, with a status message if one is + /// set, or a default help message otherwise. + fn render_footer(&self, area: Rect, buf: &mut Buffer) { + let content = if let Some(status_message) = self.last_status_message.lock().unwrap().clone() + { + status_message + } else { + match self.details_mode { + DetailsMode::ReadReceipts => { + "\nUse ↓↑ to move, s/S to start/stop the sync service, m to mark as read, t to show the timeline.".to_owned() + } + DetailsMode::TimelineItems => { + "\nUse ↓↑ to move, s/S to start/stop the sync service, r to show read receipts.".to_owned() + } + } + }; + Paragraph::new(content).centered().render(area, buf); + } +} + +impl StatefulList { + /// Focus the list on the next item, wraps around if needs be. + /// + /// Returns the index only if there was a meaningful change. + fn next(&mut self) -> Option { + let num_items = self.items.lock().unwrap().len(); + + // If there's no item to select, leave early. + if num_items == 0 { + self.state.select(None); + return None; + } + + // Otherwise, select the next one or wrap around. + let prev = self.state.selected(); + let new = prev.map_or(0, |i| if i >= num_items - 1 { 0 } else { i + 1 }); + + if prev != Some(new) { + self.state.select(Some(new)); + Some(new) + } else { + None + } + } + + /// Focus the list on the previous item, wraps around if needs be. + /// + /// Returns the index only if there was a meaningful change. + fn previous(&mut self) -> Option { + let num_items = self.items.lock().unwrap().len(); + + // If there's no item to select, leave early. + if num_items == 0 { + self.state.select(None); + return None; + } + + // Otherwise, select the previous one or wrap around. + let prev = self.state.selected(); + let new = prev.map_or(0, |i| if i == 0 { num_items - 1 } else { i - 1 }); + + if prev != Some(new) { + self.state.select(Some(new)); + Some(new) + } else { + None + } + } +} + +/// Configure the client so it's ready for sync'ing. +/// +/// Will log in or reuse a previous session. +async fn configure_client(server_name: String, config_path: String) -> anyhow::Result { + let server_name = ServerName::parse(&server_name)?; + + let config_path = PathBuf::from(config_path); + let client = Client::builder() + .store_config( + StoreConfig::default() + .crypto_store( + SqliteCryptoStore::open(config_path.join("crypto.sqlite"), None).await?, + ) + .state_store(SqliteStateStore::open(config_path.join("state.sqlite"), None).await?), + ) + .server_name(&server_name) + .with_encryption_settings(EncryptionSettings { + auto_enable_cross_signing: true, + backup_download_strategy: BackupDownloadStrategy::AfterDecryptionFailure, + auto_enable_backups: true, + }) + .build() + .await?; + + // Try reading a session, otherwise create a new one. + let session_path = config_path.join("session.json"); + if let Ok(serialized) = std::fs::read_to_string(&session_path) { + let session: MatrixSession = serde_json::from_str(&serialized)?; + client.restore_session(session).await?; + println!("restored session"); + } else { + login_with_password(&client).await?; + println!("new login"); + + // Immediately save the session to disk. + if let Some(session) = client.session() { + let AuthSession::Matrix(session) = session else { panic!("unexpected oidc session") }; + let serialized = serde_json::to_string(&session)?; + std::fs::write(session_path, serialized)?; + println!("saved session"); + } + } + + Ok(client) +} + +/// Asks the user of a username and password, and try to login using the matrix +/// auth with those. +async fn login_with_password(client: &Client) -> anyhow::Result<()> { + println!("Logging in with username and password…"); + + loop { + print!("\nUsername: "); + stdout().flush().expect("Unable to write to stdout"); + let mut username = String::new(); + io::stdin().read_line(&mut username).expect("Unable to read user input"); + username = username.trim().to_owned(); + + let password = rpassword::prompt_password("Password.")?; + + match client.matrix_auth().login_username(&username, password.trim()).await { + Ok(_) => { + println!("Logged in as {username}"); + break; + } + Err(error) => { + println!("Error logging in: {error}"); + println!("Please try again\n"); + } + } + } + + Ok(()) +} diff --git a/labs/rrrepl/src/main.rs b/labs/rrrepl/src/main.rs deleted file mode 100644 index f1d694c4ee4..00000000000 --- a/labs/rrrepl/src/main.rs +++ /dev/null @@ -1,212 +0,0 @@ -use std::{ - env, - io::{self, Write}, - process::exit, - sync::{Arc, Mutex}, -}; - -use futures_util::{pin_mut, StreamExt as _}; -use matrix_sdk::{ - config::StoreConfig, matrix_auth::MatrixSession, - ruma::api::client::receipt::create_receipt::v3::ReceiptType, AuthSession, Client, ServerName, - SqliteCryptoStore, SqliteStateStore, -}; -use matrix_sdk_ui::sync_service::{self, SyncService}; -use tokio::spawn; -use tracing_subscriber::{layer::SubscriberExt as _, util::SubscriberInitExt as _, EnvFilter}; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let file_layer = tracing_subscriber::fmt::layer() - .with_ansi(false) - .with_writer(tracing_appender::rolling::hourly("/tmp/", "logs-")); - - tracing_subscriber::registry() - .with(EnvFilter::new(std::env::var("RUST_LOG").unwrap_or("".into()))) - .with(file_layer) - .init(); - - let Some(server_name) = env::args().nth(1) else { - eprintln!("Usage: {} ", env::args().next().unwrap()); - exit(1) - }; - - login_and_sync(server_name).await?; - - Ok(()) -} - -/// Log in to the given homeserver and sync. -async fn login_and_sync(server_name: String) -> anyhow::Result<()> { - let server_name = ServerName::parse(&server_name)?; - - let client = Client::builder() - .store_config( - StoreConfig::default() - .crypto_store(SqliteCryptoStore::open("/tmp/crypto.sqlite", None).await?) - .state_store(SqliteStateStore::open("/tmp/state.sqlite", None).await?), - ) - .server_name(&server_name) - .build() - .await?; - - // Try reading from /tmp/session.json - if let Ok(serialized) = std::fs::read_to_string("/tmp/session.json") { - let session: MatrixSession = serde_json::from_str(&serialized)?; - client.restore_session(session).await?; - println!("restored session"); - } else { - login_with_password(&client).await?; - println!("new login"); - } - - let sync_service = SyncService::builder(client.clone()).build().await?; - - let room_list_service = sync_service.room_list_service(); - - let all_rooms = room_list_service.all_rooms().await?; - let (rooms, stream) = all_rooms.entries(); - - let rooms = Arc::new(Mutex::new(rooms.clone())); - - // This will sync (with encryption) until an error happens or the program is - // killed. - sync_service.start().await; - - let c = client.clone(); - let r = rooms.clone(); - let handle = spawn(async move { - pin_mut!(stream); - let rooms = r; - let client = c; - - while let Some(diffs) = stream.next().await { - let mut rooms = rooms.lock().unwrap(); - for diff in diffs { - diff.apply(&mut rooms); - } - println!("New update!"); - for (id, room) in rooms.iter().enumerate() { - if let Some(room) = room.as_room_id().and_then(|room_id| client.get_room(room_id)) { - println!("> #{id} {}: {:?}", room.room_id(), room.read_receipts()); - } - } - } - }); - - loop { - let mut command = String::new(); - - print!("$ "); - let _ = io::stdout().flush(); - io::stdin().read_line(&mut command).expect("Unable to read user input"); - - match command.trim() { - "rooms" => { - let rooms = rooms.lock().unwrap(); - for (id, room) in rooms.iter().enumerate() { - if let Some(room) = - room.as_room_id().and_then(|room_id| client.get_room(room_id)) - { - println!("> #{id} {}: {:?}", room.room_id(), room.read_receipts()); - } - } - } - - "start" => { - sync_service.start().await; - println!("> sync service started!"); - } - - "stop" => { - sync_service.stop().await?; - println!("> sync service stopped!"); - } - - "" | "exit" => { - break; - } - - _ => { - if let Some((_, id)) = command.split_once("send ") { - let id = id.trim().parse::()?; - let room_id = { rooms.lock().unwrap()[id].as_room_id().map(ToOwned::to_owned) }; - if let Some(room_id) = &room_id { - let room = room_list_service.room(room_id).await?; - - if !room.is_timeline_initialized() { - room.init_timeline_with_builder( - room.default_room_timeline_builder().await, - ) - .await?; - } - let timeline = room.timeline().unwrap(); - - let did = timeline.mark_as_read(ReceiptType::Read).await?; - println!("> did {}send a read receipt!", if did { "" } else { "not " }); - } - } else { - println!("unknown command"); - } - } - } - } - - println!("Closing sync service..."); - - let sync_service = Arc::new(sync_service); - let s = sync_service.clone(); - let wait_for_termination = spawn(async move { - while let Some(state) = s.state().next().await { - if !matches!(state, sync_service::State::Running) { - break; - } - } - }); - - sync_service.stop().await?; - handle.abort(); - wait_for_termination.await.unwrap(); - - if let Some(session) = client.session() { - let AuthSession::Matrix(session) = session else { panic!("unexpected oidc session") }; - let serialized = serde_json::to_string(&session)?; - std::fs::write("/tmp/session.json", serialized)?; - println!("saved session"); - } - - println!("okthxbye!"); - - Ok(()) -} - -async fn login_with_password(client: &Client) -> anyhow::Result<()> { - println!("Logging in with username and password…"); - - loop { - print!("\nUsername: "); - io::stdout().flush().expect("Unable to write to stdout"); - let mut username = String::new(); - io::stdin().read_line(&mut username).expect("Unable to read user input"); - username = username.trim().to_owned(); - - print!("Password: "); - io::stdout().flush().expect("Unable to write to stdout"); - let mut password = String::new(); - io::stdin().read_line(&mut password).expect("Unable to read user input"); - password = password.trim().to_owned(); - - match client.matrix_auth().login_username(&username, &password).await { - Ok(_) => { - println!("Logged in as {username}"); - break; - } - Err(error) => { - println!("Error logging in: {error}"); - println!("Please try again\n"); - } - } - } - - Ok(()) -} diff --git a/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs b/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs index f65b173b9ff..83a9247addf 100644 --- a/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs +++ b/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs @@ -412,39 +412,45 @@ async fn test_room_notification_count() -> Result<()> { alice_room.enable_encryption().await?; - let mut info_updates = alice_room.subscribe_info(); + let mut room_info_updates = alice_room.subscribe_info(); // At first, nothing has happened, so we shouldn't have any notifications. assert_eq!(alice_room.num_unread_messages(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); assert_eq!(alice_room.num_unread_notifications(), 0); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Bob joins, nothing happens. bob.join_room_by_id(&room_id).await?; - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #1") + .is_some()); assert_eq!(alice_room.num_unread_messages(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); assert_eq!(alice_room.num_unread_notifications(), 0); assert!(alice_room.latest_event().is_none()); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Bob sends a non-mention message. let bob_room = bob.get_room(&room_id).expect("bob knows about alice's room"); bob_room.send(RoomMessageEventContent::text_plain("hello world")).await?; - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #2") + .is_some()); assert_eq!(alice_room.num_unread_messages(), 1); assert_eq!(alice_room.num_unread_notifications(), 1); assert_eq!(alice_room.num_unread_mentions(), 0); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Bob sends a mention message. bob_room @@ -454,53 +460,67 @@ async fn test_room_notification_count() -> Result<()> { ) .await?; - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #3") + .is_some()); // The highlight also counts as a notification. assert_eq!(alice_room.num_unread_messages(), 2); assert_eq!(alice_room.num_unread_notifications(), 2); assert_eq!(alice_room.num_unread_mentions(), 1); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Alice marks the room as read. let event_id = latest_event.lock().await.take().unwrap().event_id().to_owned(); alice_room.send_single_receipt(ReceiptType::Read, ReceiptThread::Unthreaded, event_id).await?; // Remote echo of marking the room as read. - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #4") + .is_some()); // Sometimes, we get a spurious update quickly. - let _ = timeout(Duration::from_secs(2), info_updates.next()).await; + let _ = timeout(Duration::from_secs(2), room_info_updates.next()).await; assert_eq!(alice_room.num_unread_messages(), 0); assert_eq!(alice_room.num_unread_notifications(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Alice sends a message. alice_room.send(RoomMessageEventContent::text_plain("hello bob")).await?; // Local echo for our own message. - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #5") + .is_some()); assert_eq!(alice_room.num_unread_messages(), 0); assert_eq!(alice_room.num_unread_notifications(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); // Remote echo for our own message. - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #6") + .is_some()); assert_eq!(alice_room.num_unread_messages(), 0); assert_eq!(alice_room.num_unread_notifications(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Now Alice is only interesting in mentions of their name. let settings = alice.notification_settings().await; + let mut settings_changes = settings.subscribe_to_changes(); + tracing::warn!("Updating room notification mode to mentions and keywords only..."); settings .set_room_notification_mode( @@ -511,11 +531,17 @@ async fn test_room_notification_count() -> Result<()> { tracing::warn!("Done!"); // Wait for remote echo. - let _ = settings.subscribe_to_changes().recv().await; + timeout(Duration::from_secs(3), settings_changes.recv()) + .await + .expect("timeout when waiting for settings update") + .expect("should've received echo after updating settings"); bob_room.send(RoomMessageEventContent::text_plain("I said hello!")).await?; - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #7") + .is_some()); // The message doesn't contain a mention, so it doesn't notify Alice. But it // exists. @@ -523,7 +549,7 @@ async fn test_room_notification_count() -> Result<()> { assert_eq!(alice_room.num_unread_notifications(), 0); assert_eq!(alice_room.num_unread_mentions(), 0); - assert_pending!(info_updates); + assert_pending!(room_info_updates); // Bob sends a mention message. bob_room @@ -533,14 +559,17 @@ async fn test_room_notification_count() -> Result<()> { ) .await?; - assert!(info_updates.next().await.is_some()); + assert!(timeout(Duration::from_secs(3), room_info_updates.next()) + .await + .expect("timeout getting room info update #8") + .is_some()); // The highlight also counts as a notification. assert_eq!(alice_room.num_unread_messages(), 2); assert_eq!(alice_room.num_unread_notifications(), 1); assert_eq!(alice_room.num_unread_mentions(), 1); - assert_pending!(info_updates); + assert_pending!(room_info_updates); Ok(()) } diff --git a/testing/matrix-sdk-test-macros/src/lib.rs b/testing/matrix-sdk-test-macros/src/lib.rs index 5885a2a9bda..f6dca982d06 100644 --- a/testing/matrix-sdk-test-macros/src/lib.rs +++ b/testing/matrix-sdk-test-macros/src/lib.rs @@ -1,6 +1,6 @@ use proc_macro::TokenStream; use quote::{format_ident, quote, ToTokens}; -use syn::{self, parse_macro_input}; +use syn::parse_macro_input; /// Attribute to use `wasm_bindgen_test` for wasm32 targets and `tokio::test` /// for everything else with async-support and custom result-types diff --git a/testing/matrix-sdk-test/src/sync_builder/mod.rs b/testing/matrix-sdk-test/src/sync_builder/mod.rs index 4df10c38c29..ea6d05bd8cb 100644 --- a/testing/matrix-sdk-test/src/sync_builder/mod.rs +++ b/testing/matrix-sdk-test/src/sync_builder/mod.rs @@ -10,7 +10,7 @@ use ruma::{ }, events::{presence::PresenceEvent, AnyGlobalAccountDataEvent}, serde::Raw, - OwnedRoomId, + OwnedRoomId, OwnedUserId, UserId, }; use serde_json::{from_value as from_json_value, json, Value as JsonValue}; @@ -52,6 +52,8 @@ pub struct SyncResponseBuilder { /// Internal counter to enable the `prev_batch` and `next_batch` of each /// sync response to vary. batch_counter: i64, + /// The device lists of the user. + changed_device_lists: Vec, } impl SyncResponseBuilder { @@ -136,6 +138,11 @@ impl SyncResponseBuilder { self } + pub fn add_change_device(&mut self, user_id: &UserId) -> &mut Self { + self.changed_device_lists.push(user_id.to_owned()); + self + } + /// Builds a sync response as a JSON Value containing the events we queued /// so far. /// @@ -155,7 +162,7 @@ impl SyncResponseBuilder { "device_one_time_keys_count": {}, "next_batch": next_batch, "device_lists": { - "changed": [], + "changed": self.changed_device_lists, "left": [], }, "rooms": { diff --git a/testing/matrix-sdk-test/src/test_json/api_responses.rs b/testing/matrix-sdk-test/src/test_json/api_responses.rs index d534697717a..8d052f209a7 100644 --- a/testing/matrix-sdk-test/src/test_json/api_responses.rs +++ b/testing/matrix-sdk-test/src/test_json/api_responses.rs @@ -324,7 +324,8 @@ pub static VERSIONS: Lazy = Lazy::new(|| { ], "unstable_features": { "org.matrix.label_based_filtering":true, - "org.matrix.e2e_cross_signing":true + "org.matrix.e2e_cross_signing":true, + "org.matrix.msc4028":true } }) }); diff --git a/testing/matrix-sdk-test/src/test_json/sync.rs b/testing/matrix-sdk-test/src/test_json/sync.rs index 6a1e40bf8f9..02a4ea12a83 100644 --- a/testing/matrix-sdk-test/src/test_json/sync.rs +++ b/testing/matrix-sdk-test/src/test_json/sync.rs @@ -1525,3 +1525,333 @@ pub static VOIP_SYNC: Lazy = Lazy::new(|| { } }) }); + +pub static SYNC_ADMIN_AND_MOD: Lazy = Lazy::new(|| { + json!({ + "device_one_time_keys_count": {}, + "next_batch": "s526_47314_0_7_1_1_1_11444_1", + "device_lists": { + "changed": [ + "@admin:example.org" + ], + "left": [] + }, + "rooms": { + "invite": {}, + "join": { + *DEFAULT_TEST_ROOM_ID: { + "summary": { + "m.heroes": [ + "@example2:localhost" + ], + "m.joined_member_count": 2, + "m.invited_member_count": 0 + }, + "account_data": { + "events": [] + }, + "ephemeral": { + "events": [] + }, + "state": { + "events": [ + { + "content": { + "join_rule": "public" + }, + "event_id": "$15139375514WsgmR:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.join_rules", + "unsigned": { + "age": 7034220 + } + }, + { + "content": { + "avatar_url": null, + "displayname": "admin", + "membership": "join" + }, + "event_id": "$151800140517rfvjc:localhost", + "membership": "join", + "origin_server_ts": 151800140000000_u64, + "sender": "@admin:localhost", + "state_key": "@admin:localhost", + "type": "m.room.member", + "unsigned": { + "age": 297036, + "replaces_state": "$151800111315tsynI:localhost" + } + }, + { + "content": { + "avatar_url": null, + "displayname": "mod", + "membership": "join" + }, + "event_id": "$151800140518rfvjc:localhost", + "membership": "join", + "origin_server_ts": 1518001450000000_u64, + "sender": "@mod:localhost", + "state_key": "@mod:localhost", + "type": "m.room.member", + "unsigned": { + "age": 297035, + } + }, + { + "content": { + "history_visibility": "shared" + }, + "event_id": "$15139375515VaJEY:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.history_visibility", + "unsigned": { + "age": 703422 + } + }, + { + "content": { + "creator": "@example:localhost" + }, + "event_id": "$15139375510KUZHi:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.create", + "unsigned": { + "age": 703422 + } + }, + { + "content": { + "topic": "room topic" + }, + "event_id": "$151957878228ssqrJ:localhost", + "origin_server_ts": 151957878000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.topic", + "unsigned": { + "age": 1392989709, + "prev_content": { + "topic": "test" + }, + "prev_sender": "@example:localhost", + "replaces_state": "$151957069225EVYKm:localhost" + } + }, + { + "content": { + "ban": 50, + "events": { + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100 + }, + "events_default": 0, + "invite": 0, + "kick": 50, + "redact": 50, + "state_default": 50, + "users": { + "@admin:localhost": 100, + "@mod:localhost": 50 + }, + "users_default": 0 + }, + "event_id": "$15139375512JaHAW:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.power_levels", + "unsigned": { + "age": 703422 + } + } + ] + }, + "timeline": { + "events": [ + { + "content": { + "body": "baba", + "format": "org.matrix.custom.html", + "formatted_body": "baba", + "msgtype": "m.text" + }, + "event_id": "$152037280074GZeOm:localhost", + "origin_server_ts": 152037280000000_u64, + "sender": "@admin:localhost", + "type": "m.room.message", + "unsigned": { + "age": 598971425 + } + } + ], + "limited": true, + "prev_batch": "t392-516_47314_0_7_1_1_1_11444_1" + }, + "unread_notifications": { + "highlight_count": 0, + "notification_count": 11 + } + } + }, + "leave": {} + }, + "to_device": { + "events": [] + }, + "presence": { + "events": [] + } + }) +}); + +pub static CUSTOM_ROOM_POWER_LEVELS: Lazy = Lazy::new(|| { + json!({ + "device_one_time_keys_count": {}, + "next_batch": "s526_47314_0_7_1_1_1_11444_1", + "device_lists": { + "changed": [ + "@admin:example.org" + ], + "left": [] + }, + "rooms": { + "invite": {}, + "join": { + *DEFAULT_TEST_ROOM_ID: { + "summary": { + "m.heroes": [ + "@example2:localhost" + ], + "m.joined_member_count": 1, + "m.invited_member_count": 0 + }, + "account_data": { + "events": [] + }, + "ephemeral": { + "events": [] + }, + "state": { + "events": [ + { + "content": { + "join_rule": "public" + }, + "event_id": "$15139375514WsgmR:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.join_rules", + "unsigned": { + "age": 7034220 + } + }, + { + "content": { + "avatar_url": null, + "displayname": "admin", + "membership": "join" + }, + "event_id": "$151800140517rfvjc:localhost", + "membership": "join", + "origin_server_ts": 151800140000000_u64, + "sender": "@admin:localhost", + "state_key": "@admin:localhost", + "type": "m.room.member", + "unsigned": { + "age": 297036, + "replaces_state": "$151800111315tsynI:localhost" + } + }, + { + "content": { + "creator": "@example:localhost" + }, + "event_id": "$15139375510KUZHi:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.create", + "unsigned": { + "age": 703422 + } + }, + { + "content": { + "ban": 100, + "events": { + "m.room.avatar": 100, + "m.room.canonical_alias": 50, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100 + }, + "events_default": 0, + "invite": 0, + "kick": 50, + "redact": 50, + "state_default": 50, + "users": { + "@admin:localhost": 100 + }, + "users_default": 0 + }, + "event_id": "$15139375512JaHAW:localhost", + "origin_server_ts": 151393755000000_u64, + "sender": "@admin:localhost", + "state_key": "", + "type": "m.room.power_levels", + "unsigned": { + "age": 703422 + } + } + ] + }, + "timeline": { + "events": [ + { + "content": { + "body": "baba", + "format": "org.matrix.custom.html", + "formatted_body": "baba", + "msgtype": "m.text" + }, + "event_id": "$152037280074GZeOm:localhost", + "origin_server_ts": 152037280000000_u64, + "sender": "@admin:localhost", + "type": "m.room.message", + "unsigned": { + "age": 598971425 + } + } + ], + "limited": true, + "prev_batch": "t392-516_47314_0_7_1_1_1_11444_1" + }, + "unread_notifications": { + "highlight_count": 0, + "notification_count": 11 + } + } + }, + "leave": {} + }, + "to_device": { + "events": [] + }, + "presence": { + "events": [] + } + }) +});