diff --git a/.cursor/commands/check-pr.md b/.cursor/commands/check-pr.md
new file mode 100644
index 000000000..bccd2a6be
--- /dev/null
+++ b/.cursor/commands/check-pr.md
@@ -0,0 +1,2 @@
+check the last commits made by the following author for the PR I am reviewing and explain them extensively to me.
+If there are any critical logic errors in the commits, explain them to me and suggest fixes.
\ No newline at end of file
diff --git a/.cursor/commands/copilot-suggestion.md b/.cursor/commands/copilot-suggestion.md
new file mode 100644
index 000000000..6ec513ffb
--- /dev/null
+++ b/.cursor/commands/copilot-suggestion.md
@@ -0,0 +1 @@
+Check the following Copilot suggestion if it is valid. Implement it if it is valid and give explanation if it is not valid and skip it.
\ No newline at end of file
diff --git a/.cursor/commands/issue-new.md b/.cursor/commands/issue-new.md
new file mode 100644
index 000000000..b07bb52f9
--- /dev/null
+++ b/.cursor/commands/issue-new.md
@@ -0,0 +1,21 @@
+create an "issue" description for github for the given issue by the user.
+keep it: simple, maintainer friendly (keep it as simple as possible without using technical jargon), clear, concise and avoid duplicate information. Write it as if a User found the issue.
+
+Title Format: [Type]: [Title]
+
+Types:
+- Feature
+- Bug
+- Improvement
+- Doc
+- Question
+
+Title Example: [Feature] Support for Waybar on Bottom and on Sides
+
+## Body Format
+
+### Short Summary
+Describe the issue in a few sentences. (Max 2 sentences)
+
+### Body
+Only add this section if additional details are necessary beyond the short summary. Describe the issue in detail. List style if possible.
\ No newline at end of file
diff --git a/.cursor/commands/pr-update.md b/.cursor/commands/pr-update.md
index b9d957d49..9a112b9d5 100644
--- a/.cursor/commands/pr-update.md
+++ b/.cursor/commands/pr-update.md
@@ -1,3 +1,3 @@
# pr-update
-check commits and recent code changes to main branch, update @Pacsea/dev/PR/ and add not included updates. keep additions short, concise and clear
+check commits and recent code changes always compare to main branch, update @Pacsea/dev/PR/ and add not included updates. keep additions short, concise and clear. do not remove any updates that are already in the PR file.
diff --git a/.cursor/commands/release-new.md b/.cursor/commands/release-new.md
index 744781fda..915667a0b 100644
--- a/.cursor/commands/release-new.md
+++ b/.cursor/commands/release-new.md
@@ -1,2 +1,2 @@
-Create a new release file for the given version and automatic generate the release notes, check for changes from the last release
+Create a new release file in the releases directory (Documents/RELEASE_v{version}.md) for the given version and automatic generate the release notes, check for changes from the last release
Keep the release file User-friendly, short, concise and clear.
diff --git a/.cursor/commands/summary.md b/.cursor/commands/summary.md
new file mode 100644
index 000000000..a25276937
--- /dev/null
+++ b/.cursor/commands/summary.md
@@ -0,0 +1,3 @@
+# summary
+
+summeries last changes what was done, also check changed code to get a better picture.
diff --git a/.cursor/commands/translate.md b/.cursor/commands/translate.md
new file mode 100644
index 000000000..eee14875d
--- /dev/null
+++ b/.cursor/commands/translate.md
@@ -0,0 +1,7 @@
+Do we need to add missing i18n translation keys for all locales?
+
+Check codebase and last few changes.
+
+Add a TODO for hu-HU translations with english placeholder, if needed.
+
+Use dev/scripts/check_translation_keys.py to check for internally missing translation keys for all locales. (If missing codewise add the missing keys to the locales files)
\ No newline at end of file
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 000000000..7a42bf6f2
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,50 @@
+name: "CodeQL Advanced"
+
+on:
+ push:
+ branches: ["main"]
+ pull_request:
+ branches: ["main"]
+ schedule:
+ - cron: "42 6 * * 0"
+
+jobs:
+ analyze:
+ name: Analyze (${{ matrix.language }})
+ runs-on: ubuntu-latest
+ timeout-minutes: 360
+ permissions:
+ # required for all workflows
+ security-events: write
+
+ # required to fetch internal or private CodeQL packs
+ packages: read
+
+ # only required for workflows in private repositories
+ actions: read
+ contents: read
+
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - language: rust
+ build-mode: none
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v4
+ with:
+ languages: ${{ matrix.language }}
+ build-mode: ${{ matrix.build-mode }}
+ # Use security and quality queries for comprehensive analysis
+ queries: security-and-quality
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v4
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/Cargo.lock b/Cargo.lock
index 3f2a744ab..eb360dae8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -133,12 +133,6 @@ version = "3.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
-[[package]]
-name = "byteorder"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
-
[[package]]
name = "bytes"
version = "1.11.0"
@@ -162,9 +156,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.2.47"
+version = "1.2.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd405d82c84ff7f35739f175f67d8b9fb7687a0e84ccdc78bd3568839827cf07"
+checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215"
dependencies = [
"find-msvc-tools",
"shlex",
@@ -257,9 +251,9 @@ dependencies = [
[[package]]
name = "convert_case"
-version = "0.7.1"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7"
+checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
dependencies = [
"unicode-segmentation",
]
@@ -349,9 +343,9 @@ dependencies = [
[[package]]
name = "cssparser"
-version = "0.35.0"
+version = "0.36.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e901edd733a1472f944a45116df3f846f54d37e67e68640ac8bb69689aca2aa"
+checksum = "dae61cf9c0abb83bd659dab65b7e4e38d8236824c85f0f804f173567bda257d2"
dependencies = [
"cssparser-macros",
"dtoa-short",
@@ -416,22 +410,23 @@ dependencies = [
[[package]]
name = "derive_more"
-version = "2.0.1"
+version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
+checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618"
dependencies = [
"derive_more-impl",
]
[[package]]
name = "derive_more-impl"
-version = "2.0.1"
+version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
+checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b"
dependencies = [
"convert_case",
"proc-macro2",
"quote",
+ "rustc_version",
"syn",
]
@@ -702,15 +697,6 @@ dependencies = [
"thread_local",
]
-[[package]]
-name = "fxhash"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
-dependencies = [
- "byteorder",
-]
-
[[package]]
name = "getopts"
version = "0.2.24"
@@ -792,23 +778,21 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "html5ever"
-version = "0.35.0"
+version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55d958c2f74b664487a2035fe1dadb032c48718a03b63f3ab0b8537db8549ed4"
+checksum = "6452c4751a24e1b99c3260d505eaeee76a050573e61f30ac2c924ddc7236f01e"
dependencies = [
"log",
"markup5ever",
- "match_token",
]
[[package]]
name = "http"
-version = "1.3.1"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
+checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a"
dependencies = [
"bytes",
- "fnv",
"itoa",
]
@@ -897,9 +881,9 @@ dependencies = [
[[package]]
name = "hyper-util"
-version = "0.1.18"
+version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56"
+checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f"
dependencies = [
"base64",
"bytes",
@@ -993,9 +977,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
[[package]]
name = "icu_properties"
-version = "2.1.1"
+version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99"
+checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
dependencies = [
"icu_collections",
"icu_locale_core",
@@ -1007,9 +991,9 @@ dependencies = [
[[package]]
name = "icu_properties_data"
-version = "2.1.1"
+version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899"
+checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
[[package]]
name = "icu_provider"
@@ -1074,9 +1058,9 @@ dependencies = [
[[package]]
name = "instability"
-version = "0.3.9"
+version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a"
+checksum = "6778b0196eefee7df739db78758e5cf9b37412268bfa5650bfeed028aed20d9c"
dependencies = [
"darling",
"indoc",
@@ -1124,9 +1108,9 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
-version = "0.3.82"
+version = "0.3.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65"
+checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8"
dependencies = [
"once_cell",
"wasm-bindgen",
@@ -1140,9 +1124,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
-version = "0.2.177"
+version = "0.2.178"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
+checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091"
[[package]]
name = "linked-hash-map"
@@ -1185,9 +1169,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.28"
+version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
+checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru"
@@ -1215,26 +1199,15 @@ checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
[[package]]
name = "markup5ever"
-version = "0.35.0"
+version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "311fe69c934650f8f19652b3946075f0fc41ad8757dbb68f1ca14e7900ecc1c3"
+checksum = "6c3294c4d74d0742910f8c7b466f44dda9eb2d5742c1e430138df290a1e8451c"
dependencies = [
"log",
"tendril",
"web_atoms",
]
-[[package]]
-name = "match_token"
-version = "0.35.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac84fd3f360fcc43dc5f5d186f02a94192761a080e8bc58621ad4d12296a58cf"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
[[package]]
name = "matchers"
version = "0.2.0"
@@ -1268,9 +1241,9 @@ dependencies = [
[[package]]
name = "mio"
-version = "1.1.0"
+version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873"
+checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
dependencies = [
"libc",
"log",
@@ -1422,10 +1395,12 @@ dependencies = [
"chrono",
"clap",
"crossterm 0.29.0",
+ "ego-tree",
"futures",
"fuzzy-matcher",
"lru 0.16.2",
"portable-pty",
+ "rand",
"ratatui",
"reqwest",
"rpassword",
@@ -1482,19 +1457,20 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "phf"
-version = "0.11.3"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
+checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf"
dependencies = [
"phf_macros",
"phf_shared",
+ "serde",
]
[[package]]
name = "phf_codegen"
-version = "0.11.3"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
+checksum = "49aa7f9d80421bca176ca8dbfebe668cc7a2684708594ec9f3c0db0805d5d6e1"
dependencies = [
"phf_generator",
"phf_shared",
@@ -1502,19 +1478,19 @@ dependencies = [
[[package]]
name = "phf_generator"
-version = "0.11.3"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
+checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737"
dependencies = [
+ "fastrand",
"phf_shared",
- "rand",
]
[[package]]
name = "phf_macros"
-version = "0.11.3"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216"
+checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef"
dependencies = [
"phf_generator",
"phf_shared",
@@ -1525,9 +1501,9 @@ dependencies = [
[[package]]
name = "phf_shared"
-version = "0.11.3"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
+checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266"
dependencies = [
"siphasher",
]
@@ -1599,6 +1575,15 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+[[package]]
+name = "ppv-lite86"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
+dependencies = [
+ "zerocopy",
+]
+
[[package]]
name = "precomputed-hash"
version = "0.1.1"
@@ -1640,18 +1625,32 @@ checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
-version = "0.8.5"
+version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
+dependencies = [
+ "ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
-version = "0.6.4"
+version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
+dependencies = [
+ "getrandom 0.3.4",
+]
[[package]]
name = "ratatui"
@@ -1702,9 +1701,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
[[package]]
name = "reqwest"
-version = "0.12.24"
+version = "0.12.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
+checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a"
dependencies = [
"base64",
"bytes",
@@ -1775,6 +1774,21 @@ dependencies = [
"windows-sys 0.52.0",
]
+[[package]]
+name = "rustc-hash"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
+
+[[package]]
+name = "rustc_version"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
+dependencies = [
+ "semver",
+]
+
[[package]]
name = "rustix"
version = "0.38.44"
@@ -1816,9 +1830,9 @@ dependencies = [
[[package]]
name = "rustls-pki-types"
-version = "1.13.0"
+version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a"
+checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c"
dependencies = [
"zeroize",
]
@@ -1872,9 +1886,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "scraper"
-version = "0.24.0"
+version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5f3a24d916e78954af99281a455168d4a9515d65eca99a18da1b813689c4ad9"
+checksum = "93cecd86d6259499c844440546d02f55f3e17bd286e529e48d1f9f67e92315cb"
dependencies = [
"cssparser",
"ego-tree",
@@ -1910,23 +1924,29 @@ dependencies = [
[[package]]
name = "selectors"
-version = "0.31.0"
+version = "0.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5685b6ae43bfcf7d2e7dfcfb5d8e8f61b46442c902531e41a32a9a8bf0ee0fb6"
+checksum = "feef350c36147532e1b79ea5c1f3791373e61cbd9a6a2615413b3807bb164fb7"
dependencies = [
"bitflags 2.10.0",
"cssparser",
"derive_more",
- "fxhash",
"log",
"new_debug_unreachable",
"phf",
"phf_codegen",
"precomputed-hash",
+ "rustc-hash",
"servo_arc",
"smallvec",
]
+[[package]]
+name = "semver"
+version = "1.0.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
+
[[package]]
name = "serde"
version = "1.0.228"
@@ -2036,9 +2056,9 @@ dependencies = [
[[package]]
name = "shell-words"
-version = "1.1.0"
+version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
+checksum = "dc6fe69c597f9c37bfeeeeeb33da3530379845f10be461a66d16d03eca2ded77"
[[package]]
name = "shlex"
@@ -2078,9 +2098,9 @@ dependencies = [
[[package]]
name = "simd-adler32"
-version = "0.3.7"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
+checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
[[package]]
name = "siphasher"
@@ -2124,9 +2144,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "string_cache"
-version = "0.8.9"
+version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f"
+checksum = "a18596f8c785a729f2819c0f6a7eae6ebeebdfffbfe4214ae6b087f690e31901"
dependencies = [
"new_debug_unreachable",
"parking_lot",
@@ -2137,9 +2157,9 @@ dependencies = [
[[package]]
name = "string_cache_codegen"
-version = "0.5.4"
+version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0"
+checksum = "585635e46db231059f76c5849798146164652513eb9e8ab2685939dd90f29b69"
dependencies = [
"phf_generator",
"phf_shared",
@@ -2453,9 +2473,9 @@ dependencies = [
[[package]]
name = "tower-http"
-version = "0.6.6"
+version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
+checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
dependencies = [
"bitflags 2.10.0",
"bytes",
@@ -2695,9 +2715,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen"
-version = "0.2.105"
+version = "0.2.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60"
+checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd"
dependencies = [
"cfg-if",
"once_cell",
@@ -2708,9 +2728,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.55"
+version = "0.4.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0"
+checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c"
dependencies = [
"cfg-if",
"js-sys",
@@ -2721,9 +2741,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.105"
+version = "0.2.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2"
+checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -2731,9 +2751,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.105"
+version = "0.2.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc"
+checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40"
dependencies = [
"bumpalo",
"proc-macro2",
@@ -2744,18 +2764,18 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.105"
+version = "0.2.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76"
+checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
-version = "0.3.82"
+version = "0.3.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1"
+checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -2763,9 +2783,9 @@ dependencies = [
[[package]]
name = "web_atoms"
-version = "0.1.3"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57ffde1dc01240bdf9992e3205668b235e59421fd085e8a317ed98da0178d414"
+checksum = "acd0c322f146d0f8aad130ce6c187953889359584497dac6561204c8e17bb43d"
dependencies = [
"phf",
"phf_codegen",
@@ -3109,6 +3129,26 @@ dependencies = [
"synstructure",
]
+[[package]]
+name = "zerocopy"
+version = "0.8.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3"
+dependencies = [
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.8.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "zerofrom"
version = "0.1.6"
diff --git a/Cargo.toml b/Cargo.toml
index f12e9e8ac..ca9edb79d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -43,11 +43,13 @@ futures = "0.3"
syntect = "5.0"
fuzzy-matcher = "0.3"
rpassword = "7.4.0"
-scraper = "0.24.0"
+scraper = "0.25.0"
chrono = "0.4"
portable-pty = "0.9"
strip-ansi-escapes = "0.2"
lru = "0.16.2"
+ego-tree = "0.10"
+rand = "0.9.2"
[dev-dependencies]
tempfile = "3.10"
@@ -59,6 +61,10 @@ cognitive_complexity = "warn"
pedantic = { level = "deny", priority = -1 }
nursery = { level = "deny", priority = -1 }
unwrap_used = "deny"
+# Keep docs linting consistent with clippy naming
+missing_docs_in_private_items = "warn"
+[lints.rust]
+missing_docs = "warn"
# Ignored tests run with:
# cargo test -- --ignored
\ No newline at end of file
diff --git a/PKGBUILD-bin b/PKGBUILD-bin
index 21e16ad76..c2c72b49c 100644
--- a/PKGBUILD-bin
+++ b/PKGBUILD-bin
@@ -37,8 +37,9 @@ optdepends=(
'semgrep-bin: static analysis checks'
'shellcheck: lint shell scripts'
'downgrade: Downgrade of Packages'
- 'pacman-contrib: Update Check'
+ 'pacman-contrib: Used as a fallback for update checking'
)
+
provides=("pacsea=${pkgver}")
conflicts=('pacsea' 'pacsea-git')
source=("Pacsea::https://github.com/Firstp1ck/Pacsea/releases/download/${_tag}/Pacsea"
diff --git a/PKGBUILD-git b/PKGBUILD-git
index 1d7b44cf6..352f050f4 100644
--- a/PKGBUILD-git
+++ b/PKGBUILD-git
@@ -35,8 +35,9 @@ optdepends=(
'semgrep-bin: static analysis checks'
'shellcheck: lint shell scripts'
'downgrade: Downgrade of Packages'
- 'pacman-contrib: Update Check'
+ 'pacman-contrib: Used as a fallback for update checking'
)
+
makedepends=('cargo' 'git')
conflicts=('pacsea' 'pacsea-bin')
provides=('pacsea')
diff --git a/README.md b/README.md
index b7d244f62..4cde88794 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
[](https://www.rust-lang.org/)
[](https://archlinux.org/)
-Pacsea is a fast, friendly TUI for browsing and installing Arch and AUR packages — built for speed and minimal keystrokes.
+Pacsea is a fast, friendly TUI for browsing and installing Arch and AUR packages — plus a built-in Arch news and advisory feed — built for speed and minimal keystrokes.
## Community
@@ -64,6 +64,7 @@ pacsea
| Feature | Description |
|---------|-------------|
| **Integrated Process Execution** | All operations execute directly within the TUI with real-time output streaming, progress bars, and inline password prompts — no external terminals |
+| **News feed & advisories** | News mode shows Arch news and security advisories with filters (Arch/advisory/installed-only, age), search with history, read/unread tracking, bookmarks, and cached article content |
| **Security Scan for AUR Packages** | Comprehensive security scanning workflow with multiple tools (ClamAV, Trivy, Semgrep, ShellCheck, VirusTotal, custom patterns, aur-sleuth) and detailed scan summaries |
| **Fuzzy Search** | Toggle flexible fuzzy search mode to find packages even without exact names |
| **Unified search** | Fast results across official repos and the AUR |
@@ -121,6 +122,7 @@ Pacsea provides a keyboard-first interface for searching, queueing, and installi
- Review packages before installing with the Preflight modal
- Run security scans for AUR packages
- Manage installed packages, including removal and downgrade
+- Switch to News mode (Options → News) to browse Arch news and security advisories, filter by source/age/installed-only, search with history, and bookmark/read items with cached article content
- All operations execute directly in the TUI with real-time output and progress indicators
For a complete reference of all keyboard shortcuts, see the [Keyboard Shortcuts](https://github.com/Firstp1ck/Pacsea/wiki/Keyboard-Shortcuts) wiki page.
@@ -148,6 +150,8 @@ For complete configuration documentation, including all available settings, them
Example configuration files are available in the [`config/`](config/) directory.
+News defaults to 30-day items and shows Arch news plus security advisories; adjust via `app_start_mode` (`package` or `news`), `news_filter_show_arch_news`, `news_filter_show_advisories`, `news_filter_installed_only`, and `news_max_age_days` in `settings.conf`.
+
")
### Preflight Modal
diff --git a/config/keybinds.conf b/config/keybinds.conf
index b2ff4eec8..0598986f4 100644
--- a/config/keybinds.conf
+++ b/config/keybinds.conf
@@ -97,3 +97,6 @@ keybind_install_focus_left = Left
# NEWS — Actions
keybind_news_mark_read = r
keybind_news_mark_all_read = CTRL+R
+keybind_news_feed_mark_read = r
+keybind_news_feed_mark_unread = u
+keybind_news_feed_toggle_read = t
diff --git a/config/locales/de-DE.yml b/config/locales/de-DE.yml
index 9a9c34427..d01e677fd 100644
--- a/config/locales/de-DE.yml
+++ b/config/locales/de-DE.yml
@@ -61,6 +61,10 @@ de-DE:
search_focused: "Suche (fokussiert)"
recent: "Suchverlauf:"
recent_focused: "Suchverlauf (fokussiert)"
+ news_recent: "News-Suchverlauf:"
+ news_recent_focused: "News-Suchverlauf (fokussiert)"
+ news_bookmarks: "Lesezeichen"
+ news_bookmarks_focused: "Lesezeichen (fokussiert)"
install_list: "Installationsliste"
install_list_focused: "Installationsliste (fokussiert)"
downgrade_list: "Herabstufungs-Liste"
@@ -93,10 +97,10 @@ de-DE:
aur_maintenance_ongoing: "AUR-Wartung läuft"
aur_issues_detected: "AUR-Probleme erkannt (Status ansehen)"
aur_degraded: "AUR beeinträchtigt (Status ansehen)"
- service_outage: "{} Ausfall (Status ansehen) — {} heute: {:.1}%"
- service_degraded: "{} beeinträchtigt (Status ansehen) — {} heute: {:.1}%"
- service_issues_detected: "{} Probleme erkannt (Status ansehen) — {} heute: {:.1}%"
- systems_nominal_with_service: "Arch-Systeme normal — {} heute: {:.1}%"
+ service_outage: "{} Ausfall (Status ansehen) — {} heute: {}%"
+ service_degraded: "{} beeinträchtigt (Status ansehen) — {} heute: {}%"
+ service_issues_detected: "{} Probleme erkannt (Status ansehen) — {} heute: {}%"
+ systems_nominal_with_service: "Arch-Systeme normal — {} heute: {}%"
aur_today_suffix: " — AUR heute: {}%"
labels:
@@ -145,6 +149,10 @@ de-DE:
loading: "Prüfe Updates..."
locked: "🔒 Gesperrt"
locked_with_time: "🔒 Gesperrt: {} Min"
+ news_button:
+ ready: "News bereit"
+ none: "Keine News verfügbar"
+ loading: "Lade News..."
actions:
exit: "Beenden"
@@ -225,6 +233,7 @@ de-DE:
show_pkgbuild: "PKGBUILD anzeigen"
hide_pkgbuild: "PKGBUILD verbergen"
url_label: "URL:"
+ open_url_label: "[Im Browser öffnen]"
loading_pkgb: "PKGBUILD wird geladen…"
copy_pkgbuild: "PKGBUILD kopieren"
reload_pkgbuild: "PKGBUILD neu laden"
@@ -300,12 +309,29 @@ de-DE:
hide_install_list: "Installationsliste verbergen"
show_keybinds: "Tastenkombinationen anzeigen"
hide_keybinds: "Tastenkombinationen verbergen"
+ show_history: "History (TODO: translate to german)"
+ hide_history: "History (TODO: translate to german)"
+ show_bookmarks: "Bookmarks (TODO: translate to german)"
+ hide_bookmarks: "Bookmarks (TODO: translate to german)"
options_menu:
list_all_packages: "Alle Pakete auflisten"
list_installed_packages: "Installierte Pakete auflisten"
update_system: "System aktualisieren"
news: "Neuigkeiten"
+ news_management: "News-Verwaltung"
+ package_mode: "Paketmodus"
+ news_age_days: "News-Alter: {} Tage"
+ news_age_all: "News-Alter: unbegrenzt"
tui_optional_deps: "TUI Optionale Abhängigkeiten"
+ news:
+ filters:
+ arch: "Arch"
+ advisories: "Advisories"
+ installed_only: "Installed only"
+ loading:
+ news: "Lade Neuigkeiten..."
+ news_first_load_hint: "(Erstes Laden kann aufgrund von Rate-Limiting einige Minuten dauern)"
+ news_pkg_impact_hint: "Paketdetails von archlinux.org können während des Ladens langsamer sein."
modals:
common:
@@ -315,10 +341,12 @@ de-DE:
header_clipboard_copy: "Zwischenablage kopieren"
header_connection_issue: "Verbindungsproblem"
header_account_locked: "Konto gesperrt"
+ header_config_directories: "Konfigurationsverzeichnisse"
title_configuration_error: " Konfigurationsfehler "
title_clipboard_copy: " Zwischenablage kopieren "
title_connection_issue: " Verbindungsproblem "
title_account_locked: " Konto gesperrt "
+ title_config_directories: " Konfigurationsverzeichnisse "
account_locked: "Ihr Konto ist jetzt gesperrt. Bitte entsperren Sie es, indem Sie sich als root mit `su` anmelden und `faillock --user {} --reset` ausführen."
account_locked_with_time: "Ihr Konto ist jetzt gesperrt. Bitte entsperren Sie es, indem Sie sich als root mit `su` anmelden und `faillock --user {} --reset` ausführen. Sperrdauer: {} Minuten verbleibend."
password_prompt:
@@ -453,6 +481,16 @@ de-DE:
heading: "Arch Linux Neuigkeiten"
none: "Keine Neuigkeiten verfügbar."
footer_hint: "Hoch/Runter: auswählen • Enter: öffnen • {}: als gelesen markieren • {}: alle als gelesen markieren • Esc: schließen"
+ keybinds_hint: "Hoch/Runter/K/J: navigieren • Enter: öffnen • {}: als gelesen markieren • {}: alle als gelesen markieren • Esc/Q: schließen"
+ news_setup:
+ title: "News-Popup-Einrichtung"
+ arch_news: "Arch News"
+ advisories: "Advisories - Offizielle Sicherheitsnachrichten"
+ aur_updates: "AUR Updates"
+ aur_comments: "AUR Kommentare"
+ pkg_updates: "Offizielle Paket-Updates"
+ date_selection: "Maximales Alter"
+ footer_hint: "Hoch/Runter: auswählen • Leertaste: umschalten • Enter: speichern • Esc: abbrechen"
announcement:
footer_hint: "r: Als gelesen markieren | Enter/Esc: Schließen (erneut anzeigen)"
updates_window:
diff --git a/config/locales/en-US.yml b/config/locales/en-US.yml
index 0c310b13e..efe208a1c 100644
--- a/config/locales/en-US.yml
+++ b/config/locales/en-US.yml
@@ -61,6 +61,10 @@ en-US:
search_focused: "Search (focused)"
recent: "Search history"
recent_focused: "Search history (focused)"
+ news_recent: "News search history"
+ news_recent_focused: "News search history (focused)"
+ news_bookmarks: "Bookmarks"
+ news_bookmarks_focused: "Bookmarks (focused)"
install_list: "Install List"
install_list_focused: "Install List (focused)"
downgrade_list: "Downgrade List"
@@ -93,10 +97,10 @@ en-US:
aur_maintenance_ongoing: "AUR maintenance ongoing"
aur_issues_detected: "AUR issues detected (see status)"
aur_degraded: "AUR degraded (see status)"
- service_outage: "{} outage (see status) — {} today: {:.1}%"
- service_degraded: "{} degraded (see status) — {} today: {:.1}%"
- service_issues_detected: "{} issues detected (see status) — {} today: {:.1}%"
- systems_nominal_with_service: "Arch systems nominal — {} today: {:.1}%"
+ service_outage: "{} outage (see status) — {} today: {}%"
+ service_degraded: "{} degraded (see status) — {} today: {}%"
+ service_issues_detected: "{} issues detected (see status) — {} today: {}%"
+ systems_nominal_with_service: "Arch systems nominal — {} today: {}%"
aur_today_suffix: " — AUR today: {}%"
labels:
@@ -145,6 +149,10 @@ en-US:
loading: "Checking updates..."
locked: "🔒 Locked"
locked_with_time: "🔒 Locked: {} min"
+ news_button:
+ ready: "News Ready"
+ none: "No News available"
+ loading: "Loading news..."
actions:
exit: "Exit"
@@ -225,6 +233,7 @@ en-US:
show_pkgbuild: "Show PKGBUILD"
hide_pkgbuild: "Hide PKGBUILD"
url_label: "URL:"
+ open_url_label: "[Open in Browser]"
loading_pkgb: "Loading PKGBUILD…"
copy_pkgbuild: "Copy PKGBUILD"
reload_pkgbuild: "Reload PKGBUILD"
@@ -300,12 +309,29 @@ en-US:
hide_install_list: "Hide Install List"
show_keybinds: "Show Keybinds"
hide_keybinds: "Hide Keybinds"
+ show_history: "Show History"
+ hide_history: "Hide History"
+ show_bookmarks: "Show Bookmarks"
+ hide_bookmarks: "Hide Bookmarks"
options_menu:
list_all_packages: "List all packages"
list_installed_packages: "List installed packages"
update_system: "Update System"
news: "News"
+ news_management: "News management"
+ package_mode: "Package mode"
+ news_age_days: "News age: {} days"
+ news_age_all: "News age: all time"
tui_optional_deps: "TUI Optional Dep's"
+ news:
+ filters:
+ arch: "Arch"
+ advisories: "Advisories"
+ installed_only: "Installed only"
+ loading:
+ news: "Loading news..."
+ news_first_load_hint: "(First load may take a few minutes due to rate limiting)"
+ news_pkg_impact_hint: "Package details from archlinux.org may be slower during loading."
modals:
common:
@@ -315,10 +341,12 @@ en-US:
header_clipboard_copy: "Clipboard Copy"
header_connection_issue: "Connection issue"
header_account_locked: "Account Locked"
+ header_config_directories: "Configuration Directories"
title_configuration_error: " Configuration Error "
title_clipboard_copy: " Clipboard Copy "
title_connection_issue: " Connection issue "
title_account_locked: " Account Locked "
+ title_config_directories: " Configuration Directories "
account_locked: "Your account is now locked. Please unlock by logging in as root with `su` and running `faillock --user {} --reset`."
account_locked_with_time: "Your account is now locked. Please unlock by logging in as root with `su` and running `faillock --user {} --reset`. Lockout duration: {} minutes remaining."
password_prompt:
@@ -452,7 +480,17 @@ en-US:
title: " News "
heading: "Arch Linux News"
none: "No news items available."
- footer_hint: "Up/Down: select • Enter: open • {}: mark read • {}: mark all read • Esc: close"
+ footer_hint: "Up/Down: select • Enter: open • {}: mark read • {}: mark all read • Esc/Q: close"
+ keybinds_hint: "Up/Down/K/J: navigate • Enter: open • {}: mark read • {}: mark all read • Esc/Q: close"
+ news_setup:
+ title: "News Popup Setup"
+ arch_news: "Arch News"
+ advisories: "Advisories - Official Security News"
+ aur_updates: "AUR Updates"
+ aur_comments: "AUR Comments"
+ pkg_updates: "Official Package Updates"
+ date_selection: "Maximum age"
+ footer_hint: "Up/Down: select • Space: toggle • Enter: save • Esc: cancel"
announcement:
footer_hint: "r: Mark as read | Enter/Esc: Dismiss (show again)"
updates_window:
diff --git a/config/locales/hu-HU.yml b/config/locales/hu-HU.yml
index 852006ef1..1aeaab476 100644
--- a/config/locales/hu-HU.yml
+++ b/config/locales/hu-HU.yml
@@ -64,6 +64,12 @@ hu-HU:
search_focused: "Keresés (fókuszált)"
recent: "Keresési előzmények:"
recent_focused: "Keresési előzmények (fókuszált)"
+ # TODO: translate to hungarian
+ news_recent: "News search history"
+ news_recent_focused: "News search history (focused)"
+ # TODO: translate to hungarian
+ news_bookmarks: "Bookmarks"
+ news_bookmarks_focused: "Bookmarks (focused)"
install_list: "Telepítési lista"
install_list_focused: "Telepítési lista (fókuszált)"
downgrade_list: "Visszafejlesztési lista"
@@ -96,10 +102,10 @@ hu-HU:
aur_maintenance_ongoing: "Az AUR karbantartása folyamatban van"
aur_issues_detected: "AUR-problémák felismerve (tekintse meg az állapotot)"
aur_degraded: "Az AUR teljesítménye csökkent (tekintse meg az állapotot)"
- service_outage: "{} üzemzavar (tekintse meg az állapotot) — {} ma: {:.1}%"
- service_degraded: "A(z) {} teljesítménye csökkent (tekintse meg az állapotot) — {} ma: {:.1}%"
- service_issues_detected: "{} problémák felismerve (tekintse meg az állapotot) — {} ma: {:.1}%"
- systems_nominal_with_service: "Az Arch rendszereinek állapota megfelelő — {} ma: {:.1}%"
+ service_outage: "{} üzemzavar (tekintse meg az állapotot) — {} ma: {}%"
+ service_degraded: "A(z) {} teljesítménye csökkent (tekintse meg az állapotot) — {} ma: {}%"
+ service_issues_detected: "{} problémák felismerve (tekintse meg az állapotot) — {} ma: {}%"
+ systems_nominal_with_service: "Az Arch rendszereinek állapota megfelelő — {} ma: {}%"
aur_today_suffix: " — AUR ma: {}%"
labels:
@@ -148,6 +154,10 @@ hu-HU:
loading: "Frissítések ellenőrzése…"
locked: "🔒 Zárolt"
locked_with_time: "🔒 Zárolás {} perc múlva"
+ news_button:
+ ready: "News Ready" # TODO: translate to hungarian
+ none: "No News available" # TODO: translate to hungarian
+ loading: "Loading news..." # TODO: translate to hungarian
actions:
exit: "Kilépés"
@@ -228,6 +238,8 @@ hu-HU:
show_pkgbuild: "PKGBUILD megjelenítése"
hide_pkgbuild: "PKGBUILD elrejtése"
url_label: "Webcím:"
+ # TODO: translate to hungarian
+ open_url_label: "[Open in Browser]"
loading_pkgb: "PKGBUILD betöltése…"
copy_pkgbuild: "PKGBUILD másolása"
reload_pkgbuild: "PKGBUILD újratöltése"
@@ -301,14 +313,35 @@ hu-HU:
hide_recent: "Legutóbbi elrejtése"
show_install_list: "Telepítési lista megjelenítése"
hide_install_list: "Telepítési lista elrejtése"
- show_keybinds: "Billentyűparancsok megjelenítése"
- hide_keybinds: "Billentyűparancsok elrejtése"
+ show_keybinds: "Billentyűkötések megjelenítése"
+ hide_keybinds: "Billentyűkötések elrejtése"
+ # TODO: translate to hungarian
+ show_history: "History"
+ hide_history: "History"
+ show_bookmarks: "Bookmarks"
+ hide_bookmarks: "Bookmarks"
options_menu:
list_all_packages: "Összes csomag listázása"
list_installed_packages: "Telepített csomagok listázása"
update_system: "Rendszer frissítése"
news: "Hírek"
+ # TODO: translate to hungarian
+ news_management: "News management"
+ package_mode: "Package mode"
+ news_age_days: "News age: {} days"
+ news_age_all: "News age: all time"
tui_optional_deps: "A PacSea nem kötelező függőségei"
+ news:
+ filters:
+ # TODO: translate to hungarian
+ arch: "Arch"
+ advisories: "Advisories"
+ installed_only: "Installed only"
+ loading:
+ # TODO: translate to hungarian
+ news: "Loading news..."
+ news_first_load_hint: "(First load may take a few minutes due to rate limiting)"
+ news_pkg_impact_hint: "Package details from archlinux.org may be slower during loading."
modals:
common:
@@ -318,10 +351,14 @@ hu-HU:
header_clipboard_copy: "Másolás a vágólapra"
header_connection_issue: "Kapcsolati probléma"
header_account_locked: "Zárolt fiók"
+ # TODO: translate to hungarian
+ header_config_directories: "Configuration Directories"
title_configuration_error: " Konfigurációs hiba "
title_clipboard_copy: " Másolás a vágólapra "
title_connection_issue: " Kapcsolati probléma "
title_account_locked: " Zárolt fiók "
+ # TODO: translate to hungarian
+ title_config_directories: " Configuration Directories "
account_locked: "Fiókja jelenleg zárolva van. A zárolás feloldásához jelentkezzen be rendszergazdaként a „su” paranccsal, és futtassa a „faillock --user {} --reset” parancsot."
account_locked_with_time: "Fiókja jelenleg zárolva van. A zárolás feloldásához jelentkezzen be rendszergazdaként a „su” paranccsal, és futtassa a „faillock --user {} --reset” parancsot. A fiók zárolásáig {} perc van hátra."
password_prompt:
@@ -453,9 +490,22 @@ hu-HU:
updates: " Elérhető frissítések megnyitása"
news:
title: " Hírek "
- heading: "Arch Linux hírek"
- none: "Nincsenek elérhető hírek."
+ # TODO: translate to hungarian
+ heading: "Arch Linux News"
+ none: "No news items available."
+ # TODO: translate to hungarian
+ keybinds_hint: "Up/Down/K/J: navigate • Enter: open • {}: mark read • {}: mark all read • Esc/Q: close"
footer_hint: "Fel/Le: kiválasztás • Enter: megnyitás • {}: megjelölés olvasottként • {}: összes megjelölése olvasottként • Esc: bezárás"
+ news_setup:
+ # TODO: translate to hungarian
+ title: "News Popup Setup"
+ arch_news: "Arch News"
+ advisories: "Advisories - Official Security News"
+ aur_updates: "AUR Updates"
+ aur_comments: "AUR Comments"
+ pkg_updates: "Official Package Updates"
+ date_selection: "Maximum age"
+ footer_hint: "Up/Down: select • Space: toggle • Enter: save • Esc: cancel"
announcement:
footer_hint: "r: Megjelölés olvasottként | Enter/Esc: Eltüntetés (megjelenítés újra)"
updates_window:
diff --git a/config/settings.conf b/config/settings.conf
index d1ed565d4..7c20ad4a9 100644
--- a/config/settings.conf
+++ b/config/settings.conf
@@ -135,6 +135,27 @@ scan_do_sleuth = true
# Symbols for read/unread indicators in the News popup
news_read_symbol = ✓
news_unread_symbol = ∘
+# News feed filters (toggle visibility of sources)
+news_filter_show_arch_news = true
+news_filter_show_advisories = true
+news_filter_show_pkg_updates = true
+news_filter_show_aur_updates = true
+news_filter_show_aur_comments = true
+# When news_filter_show_advisories is true, this restricts advisories to only those affecting installed packages
+news_filter_installed_only = false
+news_max_age_days = 30
+
+# Startup News Popup Configuration
+# Whether startup news popup setup has been completed
+startup_news_configured = false
+# News sources to show in startup popup
+startup_news_show_arch_news = true
+startup_news_show_advisories = true
+startup_news_show_aur_updates = true
+startup_news_show_aur_comments = true
+startup_news_show_pkg_updates = true
+# Maximum age of news items in days for startup popup (7, 30, or 90)
+startup_news_max_age_days = 7
# VirusTotal
# API key used for VirusTotal scans (optional)
diff --git a/dev/ANNOUNCEMENTS/announcement.json b/dev/ANNOUNCEMENTS/announcement.json
index 8b2de4e9e..4d603fef9 100644
--- a/dev/ANNOUNCEMENTS/announcement.json
+++ b/dev/ANNOUNCEMENTS/announcement.json
@@ -1,7 +1,7 @@
{
- "id": "2025-12-06-community-poll-ux-vs-features",
- "title": "Community Poll: UX Refinements or New Features?",
- "content": "I want your input for the next milestone. Should I focus on UX refinements or on shipping more features (see the README for the list)?\n\nVote in the poll here: https://github.com/Firstp1ck/Pacsea/discussions/83",
+ "id": "2025-12-20-nvidia-590-pascal-support",
+ "title": "Critical: NVIDIA 590 Driver Drops Pascal Support",
+ "content": "IMPORTANT: The NVIDIA 590 driver has discontinued support for Pascal architecture GPUs, and Arch Linux main packages have switched to Open Kernel Modules.\n\n**Impact:**\n- Pascal-based GPUs (GTX 10xx series, including GTX 1050, 1060, 1070, 1080, etc.) are no longer supported by NVIDIA 590 drivers\n- Users with Pascal GPUs who update to NVIDIA 590 will lose graphics functionality\n- Main NVIDIA packages in Arch Linux now use Open Kernel Modules\n\n**Resolution:**\n- Pascal GPU users must take action to maintain functionality:\n 1. Uninstall the official nvidia, nvidia-lts, or nvidia-dkms packages\n 2. Install nvidia-580xx-dkms from the AUR\n- Do NOT update to NVIDIA 590 if you have a Pascal-based GPU\n- For non-Pascal GPUs, replace packages with Open Kernel Modules versions:\n - Replace nvidia with nvidia-open\n - Replace nvidia-dkms with nvidia-open-dkms\n - Replace nvidia-lts with nvidia-lts-open\n\n**Why this matters for Pacsea users:**\nPacsea manages package updates, and this change affects NVIDIA driver packages. Users with Pascal GPUs need to be aware that updating NVIDIA drivers may break their graphics functionality. If you have a Pascal GPU, you'll need to switch to nvidia-580xx-dkms from the AUR instead of the official packages.\n\n**Source:** https://www.reddit.com/r/archlinux/comments/1prm8rl/archannounce_nvidia_590_driver_drops_pascal/\n\n**Note:** Check your GPU model before updating NVIDIA drivers. Pascal GPUs include the entire GTX 10xx series (1050, 1050 Ti, 1060, 1070, 1070 Ti, 1080, 1080 Ti).",
"min_version": "0.6.0",
"max_version": null,
"expires": null
diff --git a/dev/Demo.txt b/dev/Demo.txt
new file mode 100644
index 000000000..3d8348dca
--- /dev/null
+++ b/dev/Demo.txt
@@ -0,0 +1,42 @@
+Demo:
+- Start app
+
+Install Process Demo:
+- Add packages to install list
+- go to install list and start preflight
+- go through each tab and explain
+- proceed with 'p' for Reinstall prompt
+- continue to sudo prompt
+- install process
+
+Removal Demo:
+- go to Installed packages
+- add not used package to removal list (add another one
+- Start removal process by opening preflight
+- Explain each tab
+- start removal process
+
+Scan Process
+- Add one AUR to install list
+- start preflight
+- start scan process with 's'
+- choose scan options
+- start scan process
+
+General usage:
+- Use filter
+- Use sorting
+- Use Fuzzy/Normal
+- Open Configs
+- Show/hide Panels
+- Open TUI Optional deps
+- Open News management
+
+System Update:
+- Open System update
+- Choose system update settings
+- choose countries
+- choose number of mirrors
+- Start System update
+
+
diff --git a/dev/IMPROVEMENTS/IMPLEMENTATION_PLAN_arch-toolkit.md b/dev/IMPROVEMENTS/IMPLEMENTATION_PLAN_arch-toolkit.md
new file mode 100644
index 000000000..a8b6cdab3
--- /dev/null
+++ b/dev/IMPROVEMENTS/IMPLEMENTATION_PLAN_arch-toolkit.md
@@ -0,0 +1,360 @@
+# Implementation Plan: Migrating Pacsea to Use arch-toolkit
+
+**Created:** 2025-01-XX
+**Status:** Planning
+**Target:** Replace custom AUR implementation with `arch-toolkit` crate
+
+## Overview
+
+This document outlines the plan to migrate Pacsea's AUR-related functionality to use the `arch-toolkit` crate, which is now available on crates.io. This migration will reduce code duplication, improve maintainability, and leverage the robust rate limiting and caching features provided by arch-toolkit.
+
+## Current State Analysis
+
+### What Pacsea Currently Implements
+
+1. **AUR Search** (`src/sources/search.rs`)
+ - Direct AUR RPC v5 API calls via `curl`
+ - Manual JSON parsing
+ - Manual error handling
+ - No built-in rate limiting
+ - No caching
+
+2. **AUR Comments** (`src/sources/comments.rs`)
+ - HTML scraping using `reqwest` + `scraper`
+ - Complex HTML parsing logic (~700 lines)
+ - Manual date parsing and timezone conversion
+ - Manual rate limiting (5s timeout)
+ - No caching
+
+3. **PKGBUILD Fetching** (`src/sources/pkgbuild.rs`, `src/logic/files/pkgbuild_fetch.rs`)
+ - AUR: Direct curl calls to AUR cgit
+ - Official: GitLab API calls
+ - Manual rate limiting with mutex (200ms/500ms intervals)
+ - Local cache checking (yay/paru caches)
+ - No network-level caching
+
+### What arch-toolkit Provides
+
+1. **AUR Search** (`ArchClient::aur().search()`)
+ - AUR RPC v5 API integration
+ - Automatic rate limiting (200ms minimum)
+ - Built-in retry policies with exponential backoff
+ - Optional caching (memory and disk)
+ - Proper error types
+
+2. **AUR Package Info** (`ArchClient::aur().info()`)
+ - Batch fetching for multiple packages
+ - Same rate limiting and caching as search
+
+3. **AUR Comments** (`ArchClient::aur().comments()`)
+ - HTML scraping with proper parsing
+ - Date parsing and sorting
+ - Pinned comment detection
+ - Rate limiting and caching support
+
+4. **PKGBUILD Fetching** (`ArchClient::aur().pkgbuild()`)
+ - AUR cgit integration
+ - Automatic rate limiting (200ms minimum)
+ - Caching support
+ - Proper error handling
+
+## Migration Strategy
+
+### Phase 1: Add Dependency and Setup Client
+
+**Files to Modify:**
+- `Cargo.toml` - Add `arch-toolkit` dependency
+- `src/sources/mod.rs` - Initialize `ArchClient` instance
+- `src/app/runtime/` - Pass `ArchClient` through runtime
+
+**Tasks:**
+1. Add `arch-toolkit = "0.1.0"` to `Cargo.toml` dependencies
+2. Create a shared `ArchClient` instance in the runtime
+3. Configure client with appropriate timeout and user agent
+4. Optionally enable caching if desired
+
+**Estimated Effort:** 1-2 hours
+
+### Phase 2: Replace AUR Search
+
+**Files to Modify:**
+- `src/sources/search.rs` - Replace `fetch_all_with_errors()` implementation
+- `src/logic/` - Update callers if needed
+
+**Current Implementation:**
+```rust
+pub async fn fetch_all_with_errors(query: String) -> (Vec, Vec)
+```
+
+**New Implementation:**
+- Use `client.aur().search(&query).await`
+- Map `AurPackage` to `PackageItem`:
+ - Most fields map directly (name, version, description, popularity, out_of_date, orphaned)
+ - Set `source: Source::Aur` explicitly
+- Convert errors to `Vec` format (for backward compatibility)
+
+**Considerations:**
+- arch-toolkit returns `Result>`, not `(Vec, Vec)`
+- `AurPackage` already has `orphaned: bool` field - no need to derive from maintainer!
+- Error handling needs to be converted to string format
+- May want to keep error tuple format for backward compatibility initially
+
+**Estimated Effort:** 2-3 hours
+
+### Phase 3: Replace AUR Comments
+
+**Files to Modify:**
+- `src/sources/comments.rs` - Replace `fetch_aur_comments()` implementation
+- `src/state/types.rs` - Check if `AurComment` types are compatible
+
+**Current Implementation:**
+- ~700 lines of HTML parsing, date parsing, timezone conversion
+- Complex pinned comment detection
+- Markdown conversion
+
+**New Implementation:**
+- Use `client.aur().comments(&pkgname).await`
+- Check if `arch_toolkit::AurComment` matches `pacsea::state::types::AurComment`
+- If types differ, create conversion function
+- May need to keep some formatting logic if arch-toolkit's output differs
+
+**Considerations:**
+- ✅ `AurComment` types are identical - no conversion needed!
+- Date format should be compatible (both use same parsing logic)
+- Markdown rendering is handled by arch-toolkit (same approach as Pacsea)
+- Can remove ~600+ lines of HTML parsing code
+
+**Estimated Effort:** 3-4 hours
+
+### Phase 4: Replace AUR PKGBUILD Fetching
+
+**Files to Modify:**
+- `src/sources/pkgbuild.rs` - Update AUR PKGBUILD fetching
+- `src/logic/files/pkgbuild_fetch.rs` - Update `fetch_pkgbuild_fast()` for AUR packages
+
+**Current Implementation:**
+- Manual rate limiting with mutex
+- Direct curl calls
+- Local cache checking (yay/paru)
+
+**New Implementation:**
+- Use `client.aur().pkgbuild(&name).await` for AUR packages
+- Keep local cache checking (Pacsea-specific feature)
+- Keep GitLab fetching for official packages (arch-toolkit doesn't handle this)
+
+**Considerations:**
+- arch-toolkit only handles AUR PKGBUILDs
+- Official package PKGBUILD fetching must remain in Pacsea
+- Local cache checking (yay/paru) should remain
+- Can remove manual rate limiting for AUR packages
+
+**Estimated Effort:** 2-3 hours
+
+### Phase 5: Optional - Enable Caching
+
+**Files to Modify:**
+- `src/app/runtime/` - Configure `ArchClient` with caching
+- `src/sources/mod.rs` - Cache configuration
+
+**Tasks:**
+1. Configure `CacheConfig` with appropriate TTLs
+2. Enable memory cache (fast, no persistence)
+3. Optionally enable disk cache (persists across restarts)
+4. Test cache behavior
+
+**Considerations:**
+- Caching may change behavior (stale data)
+- Need to decide on cache TTLs per operation
+- Disk cache requires `cache-disk` feature flag
+
+**Estimated Effort:** 1-2 hours
+
+### Phase 6: Cleanup and Testing
+
+**Tasks:**
+1. Remove unused code:
+ - Old AUR search implementation
+ - Old AUR comments HTML parsing
+ - Manual rate limiting code (for AUR operations)
+ - Unused dependencies (`scraper`? - check if still needed)
+2. Update tests:
+ - Mock `ArchClient` for unit tests
+ - Update integration tests
+ - Verify backward compatibility
+3. Run full test suite:
+ - `cargo fmt --all`
+ - `cargo clippy --all-targets --all-features -- -D warnings`
+ - `cargo check`
+ - `cargo test -- --test-threads=1`
+
+**Estimated Effort:** 3-4 hours
+
+## Detailed Implementation Notes
+
+### Type Compatibility
+
+**AUR Search:**
+- `arch_toolkit::AurPackage` → `pacsea::state::PackageItem`
+ - `name: String` → `name: String` ✓
+ - `version: String` → `version: String` ✓
+ - `description: String` → `description: String` ✓
+ - `popularity: Option` → `popularity: Option` ✓
+ - `out_of_date: Option` → `out_of_date: Option` ✓
+ - `orphaned: bool` → `orphaned: bool` ✓ (arch-toolkit already has this!)
+ - `maintainer: Option` → can derive `orphaned` from this if needed
+ - `source: Source::Aur` (need to set explicitly)
+
+**AUR Comments:**
+- ✅ **Types are IDENTICAL!** `arch_toolkit::AurComment` matches `pacsea::state::types::AurComment` exactly:
+ - `id: Option` ✓
+ - `author: String` ✓
+ - `date: String` ✓
+ - `date_timestamp: Option` ✓
+ - `date_url: Option` ✓
+ - `content: String` ✓
+ - `pinned: bool` ✓
+- **No conversion needed!** Can use arch-toolkit's type directly.
+
+**PKGBUILD:**
+- Simple: `String` → `String` ✓
+
+### Error Handling
+
+**Current Pattern:**
+```rust
+pub async fn fetch_all_with_errors(query: String) -> (Vec, Vec)
+```
+
+**arch-toolkit Pattern:**
+```rust
+pub async fn search(&self, query: &str) -> Result>
+```
+
+**Options:**
+1. Keep current pattern (convert errors to strings)
+2. Migrate to `Result` pattern (breaking change)
+3. Hybrid: Use `Result` internally, convert at boundary
+
+**Recommendation:** Option 1 for backward compatibility, consider Option 2 in future refactor.
+
+### Rate Limiting
+
+**Current:**
+- Manual mutex-based rate limiting
+- Different intervals for different operations (200ms, 500ms, 5s)
+
+**arch-toolkit:**
+- Automatic rate limiting (200ms minimum for archlinux.org)
+- Exponential backoff on failures
+- Configurable retry policies
+
+**Impact:**
+- Can remove manual rate limiting code
+- May need to adjust retry policies if current behavior differs
+
+### Caching
+
+**Current:**
+- No network-level caching
+- Only local file system caching (yay/paru caches)
+
+**arch-toolkit:**
+- Memory cache (in-process)
+- Disk cache (persistent, requires `cache-disk` feature)
+- Configurable TTLs per operation
+
+**Recommendation:**
+- Start without caching (Phase 1-4)
+- Enable caching in Phase 5 if desired
+- Keep local cache checking (yay/paru) as fallback
+
+## Dependencies Impact
+
+### New Dependencies
+- `arch-toolkit = "0.1.0"` (add)
+
+### Potentially Removable Dependencies
+- `scraper = "0.25.0"` - Check if still needed after comments migration
+ - May still be needed for other HTML parsing (news, advisories?)
+
+### No Change
+- `reqwest` - Still needed for other operations
+- `serde_json` - Still needed for other JSON parsing
+
+## Testing Strategy
+
+### Unit Tests
+1. Mock `ArchClient` for isolated testing
+2. Test type conversions (`AurPackage` → `PackageItem`)
+3. Test error handling conversions
+
+### Integration Tests
+1. Test AUR search with real API (with `--ignored` flag)
+2. Test AUR comments with real API
+3. Test PKGBUILD fetching
+4. Verify rate limiting behavior
+5. Test caching (if enabled)
+
+### Regression Tests
+1. Verify search results match previous implementation
+2. Verify comments display correctly
+3. Verify PKGBUILD fetching works for both AUR and official packages
+4. Test error cases (network failures, invalid packages)
+
+## Risk Assessment
+
+### Low Risk
+- AUR search replacement (straightforward API mapping)
+- PKGBUILD fetching (simple string return)
+
+### Medium Risk
+- AUR comments (complex HTML parsing, need to verify output compatibility)
+- Error handling changes (may affect error messages)
+
+### High Risk
+- None identified
+
+## Rollback Plan
+
+If issues arise:
+1. Keep old implementations in separate modules
+2. Use feature flag to switch between old/new
+3. Or revert commits if needed
+
+## Success Criteria
+
+1. ✅ All existing functionality works as before
+2. ✅ Code reduction (fewer lines, less complexity)
+3. ✅ Improved error handling
+4. ✅ Better rate limiting (automatic)
+5. ✅ All tests pass
+6. ✅ No performance regression
+7. ✅ Clippy and fmt pass
+
+## Timeline Estimate
+
+- **Phase 1:** 1-2 hours
+- **Phase 2:** 2-3 hours
+- **Phase 3:** 3-4 hours
+- **Phase 4:** 2-3 hours
+- **Phase 5:** 1-2 hours (optional)
+- **Phase 6:** 3-4 hours
+
+**Total:** 12-18 hours (1.5-2.5 days)
+
+## Next Steps
+
+1. ✅ Review this plan
+2. ✅ Check `AurComment` type compatibility - **CONFIRMED: Types are identical!**
+3. Start with Phase 1 (add dependency)
+4. Proceed phase by phase with testing after each phase
+5. Document any deviations from plan
+
+## Notes
+
+- arch-toolkit only handles AUR operations, not official packages
+- Local cache checking (yay/paru) should remain as it's Pacsea-specific
+- Official package PKGBUILD fetching must remain in Pacsea
+- Consider enabling caching after initial migration is stable
+- May want to contribute improvements back to arch-toolkit if needed
+
diff --git a/dev/IMPROVEMENTS/NEWS_DATA_FETCHING_OPTIMIZATIONS.md b/dev/IMPROVEMENTS/NEWS_DATA_FETCHING_OPTIMIZATIONS.md
new file mode 100644
index 000000000..15e5b978e
--- /dev/null
+++ b/dev/IMPROVEMENTS/NEWS_DATA_FETCHING_OPTIMIZATIONS.md
@@ -0,0 +1,302 @@
+# News Data Fetching Optimizations
+
+> **Overview**: This document lists all measures implemented to **reduce data fetching** for news management. These optimizations help **minimize network usage**, **improve performance**, and **reduce server load**.
+
+---
+
+## Table of Contents
+
+1. [Overview](#overview)
+2. [Implemented Optimizations](#implemented-optimizations)
+ - [Smart Caching](#smart-caching)
+ - [Network Efficiency](#network-efficiency)
+ - [Request Control](#request-control)
+ - [Error Handling](#error-handling)
+ - [Data Filtering](#data-filtering)
+3. [Future Suggestions](#future-suggestions)
+4. [Summary](#summary)
+
+---
+
+## Overview
+
+### Key Benefits
+
+| Benefit | Description |
+|---------|-------------|
+| **Reduced Network Usage** | By caching data and using conditional requests, the app downloads much less data |
+| **Improved Speed** | Cached data loads instantly, and smart request timing prevents delays |
+| **Increased Reliability** | Circuit breakers and error handling ensure the app works even when servers have issues |
+| **Server-Friendly** | Rate limiting and request serialization prevent overwhelming servers |
+
+### Optimization Categories
+
+| Category | Features | Status |
+|----------|----------|--------|
+| **Caching** | • Memory Cache
• Disk Cache
• Multi-Layer System | ✅ Implemented |
+| **Network Efficiency** | • Conditional Requests
• Connection Reuse
• Browser Compatibility | ✅ Implemented |
+| **Request Control** | • Rate Limiting
• Smart Timing
• Retry Logic | ✅ Implemented |
+| **Error Handling** | • Circuit Breaker
• Graceful Degradation
• Timeout Management | ✅ Implemented |
+| **Data Filtering** | • Early Filtering
• Request Deduplication
• Smart Fetching | ✅ Implemented |
+| **Future Improvements** | • Incremental Updates
• HTTP Compression
• Batch Requests | 🔄 Suggested |
+
+---
+
+## Implemented Optimizations
+
+### Smart Caching
+
+#### Multi-Layer Cache System
+
+| Cache Type | Duration | Use Case |
+|------------|----------|----------|
+| **Fast Memory Cache** | **15 minutes** | Same session viewing |
+| **Persistent Disk Cache** | **14 days** (configurable) | After app restart |
+| **Separate Caches** | Per source | News feeds, articles, updates, comments |
+
+#### Cache Benefits
+
+- ✅ **No Repeated Downloads**: Once fetched, data is **reused from cache** instead of downloading again
+- ✅ **Works Offline**: Cached data can be shown even when the network is unavailable
+- ✅ **Faster Loading**: Cached data loads **instantly** without waiting for network requests
+
+---
+
+### Network Efficiency
+
+#### Conditional Requests
+
+| Feature | How It Works | Benefit |
+|---------|-------------|---------|
+| **ETag Support** | Checks if content changed via ETag headers | Server responds with "not modified" if unchanged, **saving bandwidth** |
+| **Last-Modified** | Uses modification dates | **Avoids downloading unchanged content** |
+| **304 Not Modified** | Server confirms no changes | Uses **cached version instead of downloading** |
+
+#### Connection Reuse
+
+- **Connection Pooling**: **Reuses existing network connections** instead of creating new ones for each request
+- **Reduced Overhead**: Minimizes connection setup time and resource usage
+
+#### Browser Compatibility
+
+- **Browser-Like Headers**: Uses headers similar to web browsers to work better with server protection systems
+- **Proper User-Agent**: Identifies the app properly to servers
+
+---
+
+### Request Control
+
+#### Rate Limiting
+
+| Setting | Value | Purpose |
+|--------|-------|---------|
+| **General Requests** | **500ms** minimum delay | Prevents rapid-fire requests |
+| **archlinux.org** | **2 seconds** minimum delay | Respects server limits |
+| **Progressive Delays** | Up to **60 seconds** | Auto-adjusts when server indicates overload |
+| **Request Serialization** | **1 at a time** | Prevents overwhelming archlinux.org |
+
+#### Smart Timing
+
+- **Random Jitter**: Adds small random delays (**0-500ms**) to prevent multiple clients from requesting at the exact same time
+- **Staggered Startup**: Delays initial requests when the app starts to **spread out load** across different users
+
+#### Retry Logic
+
+| Retry Strategy | Details |
+|---------------|---------|
+| **Exponential Backoff** | **2s → 4s → 8s → 16s**, up to **60s** |
+| **Limited Retries** | Only **2 retries** (**3 total attempts**) |
+| **Server Instructions** | Honors **"Retry-After"** headers |
+
+---
+
+### Error Handling
+
+#### Circuit Breaker Pattern
+
+| State | Trigger | Action |
+|-------|--------|--------|
+| **Failure Detection** | **50% of recent requests fail** | **Stops making new requests** temporarily |
+| **Automatic Recovery** | After **60 seconds** | Tries one test request, resumes if successful |
+| **Graceful Degradation** | When blocked | Shows **cached content if available** instead of errors |
+
+#### Network Error Handling
+
+- **HTTP 429 Handling**: Properly handles **"too many requests"** errors with appropriate delays
+- **Timeout Management**: Sets reasonable timeouts (**15s connect, 30s total**) to avoid hanging requests
+- **Error Recovery**: **Falls back to cached content** when network requests fail
+
+---
+
+### Data Filtering
+
+#### Filtering Strategies
+
+| Strategy | Description | Benefit |
+|----------|-------------|---------|
+| **Date-Based Filtering** | Stops fetching when items exceed max age | **Avoids unnecessary data download** |
+| **Installed Packages Only** | Skips uninstalled packages when filtered | **Skips fetching data** for irrelevant packages |
+| **Time-Based Skipping** | Skips re-fetch if fetched within **5 minutes** | Prevents redundant requests |
+| **Selective Fetching** | **Only fetches what's needed** | Based on current filters and settings |
+
+#### Request Optimization
+
+- **Smart Parallelization**: Fetches different data sources **in parallel** when possible, but **serializes requests** to the same server
+- **Stale Request Draining**: When users scroll quickly, **cancels older pending requests** and only processes the most recent one
+- **Debounced Fetching**: Waits **0.5 seconds** after selecting a news item before fetching content
+
+---
+
+## Future Suggestions
+
+> **Priority Order**: Optimizations are prioritized by their impact on **reducing server data fetching**, with **user experience improvements** as a secondary consideration.
+
+### Priority Overview
+
+| Priority | Focus | Count |
+|----------|-------|-------|
+| **Highest** | Data Fetching | 1 |
+| **High** | Data Fetching | 2 |
+| **Medium-High** | User Usability + Data Fetching | 1 |
+| **Medium** | User Usability + Data Fetching | 2 |
+| **Lower** | User Usability | 1 |
+| **Lowest** | Disk Usage | 1 |
+
+---
+
+### 1. Incremental Feed Updates ⭐ Highest Priority
+
+**Improves**: **Data Fetching**
+
+#### Description
+Track which news items have already been fetched. On refresh, **only fetch new items** since the last check instead of re-fetching the entire feed. This is partially implemented but could be extended.
+
+#### Impact
+- **Directly reduces the number of server requests** by avoiding re-fetching unchanged content
+- Can reduce request size by **80-95%** on subsequent refreshes
+
+---
+
+### 2. HTTP Compression ⭐ High Priority
+
+**Improves**: **Data Fetching**
+
+#### Description
+Add `Accept-Encoding: gzip, deflate` header to requests. Servers can compress responses, reducing bandwidth by **60-80%** for text content. The HTTP client would automatically decompress responses.
+
+#### Impact
+- **Significantly reduces bandwidth per request** without changing request frequency
+- **Easy to implement** with minimal code changes
+
+---
+
+### 3. Batch Request Optimization ⭐ High Priority
+
+**Improves**: **Data Fetching**
+
+#### Description
+When multiple items need content fetching, batch them intelligently. Wait a short period (**100-200ms**) to collect multiple requests, then fetch them together if they're from the same server.
+
+#### Impact
+- **Reduces the number of separate HTTP requests** by combining multiple fetches into fewer requests
+- Reduces server load and connection overhead
+
+---
+
+### 4. Smart Cache Warming ⭐ Medium-High Priority
+
+**Improves**: **User Usability**, **Data Fetching**
+
+#### Description
+On app startup, if cache is old but still valid, **show cached content immediately** while refreshing in the background. Users see content **instantly** while fresh data loads silently.
+
+#### Impact
+- **Improves perceived performance significantly**
+- Reduces user-initiated refresh requests since content is already fresh when they need it
+
+---
+
+### 5. Network-Aware Fetching ⭐ Medium Priority
+
+**Improves**: **Data Fetching**, **User Usability**
+
+#### Description
+- **Connection Quality Detection**: Detect slow or unreliable connections and adjust behavior (longer timeouts, more aggressive caching, less prefetching)
+- **WiFi vs Mobile Detection**: **Reduce prefetching and background updates** when on mobile data to save user's data plan
+
+#### Impact
+- **Reduces unnecessary requests** on poor connections and respects user's data plan limits
+- **Prevents wasted bandwidth** on failed requests
+
+---
+
+### 6. Background Refresh ⭐ Medium Priority
+
+**Improves**: **User Usability**
+
+#### Description
+- **Idle-Time Updates**: When the app is idle (no user interaction for **30+ seconds**), refresh cached data in the background
+- **Low-Priority Refresh**: Mark background refreshes as low priority to avoid interfering with user-initiated requests
+
+#### Impact
+- **Improves user experience** by keeping data fresh without user action
+- Better timing reduces perceived wait times
+
+---
+
+### 7. Predictive Prefetching ⭐ Lower Priority
+
+**Improves**: **User Usability**
+
+#### Description
+- **Adjacent Item Prefetching**: When a user is viewing a news item, prefetch content for the **next 1-2 items** in the list
+- **Scroll Direction Awareness**: Prefetch items in the direction the user is scrolling (up or down)
+
+#### Impact
+- **Improves user experience significantly**, but may increase total requests if users don't view prefetched items
+- Should be implemented carefully with limits
+
+---
+
+### 8. Cache Compression ⭐ Lowest Priority
+
+**Improves**: **Disk Usage**
+
+#### Description
+Compress cached data before saving to disk (using gzip). Reduces disk space usage by **60-80%** and speeds up disk I/O for large cache files.
+
+#### Impact
+- Only affects local disk usage, not server requests
+- Useful for users with limited disk space but doesn't reduce data fetching
+
+---
+
+## Summary
+
+### Combined Impact
+
+These measures work together to create a comprehensive optimization strategy:
+
+```
+┌──────────────────────────────────────────────────────────────┐
+│ Optimization Results │
+├──────────────────────────────────────────────────────────────┤
+│ ✅ Reduced Network Usage │ ✅ Improved Speed │
+│ ✅ Increased Reliability │ ✅ Server-Friendly │
+└──────────────────────────────────────────────────────────────┘
+```
+
+### Key Achievements
+
+- **Reduce Network Usage**: By caching data and using conditional requests, the app **downloads much less data**
+- **Improve Speed**: Cached data loads **instantly**, and smart request timing prevents delays
+- **Increase Reliability**: Circuit breakers and error handling ensure the app works even when servers have issues
+- **Respect Server Limits**: Rate limiting and request serialization **prevent overwhelming servers**
+
+### Final Result
+
+The result is a news system that is **fast**, **efficient**, and **respectful** of both network resources and server capacity.
+
+---
+
+*Last Updated: Document reflects current implementation status and future improvement suggestions*
diff --git a/dev/IMPROVEMENTS/PACKAGE_MANAGEMENT_OPTIMIZATIONS.md b/dev/IMPROVEMENTS/PACKAGE_MANAGEMENT_OPTIMIZATIONS.md
new file mode 100644
index 000000000..ccf8ae727
--- /dev/null
+++ b/dev/IMPROVEMENTS/PACKAGE_MANAGEMENT_OPTIMIZATIONS.md
@@ -0,0 +1,297 @@
+# Package Management Data Fetching Optimizations
+
+> **Overview**: This document lists all measures implemented to **reduce data fetching** for package management. These optimizations help **minimize system calls**, **improve performance**, and **reduce database queries**.
+
+---
+
+## Table of Contents
+
+1. [Overview](#overview)
+2. [Implemented Optimizations](#implemented-optimizations)
+ - [Smart Caching](#smart-caching)
+ - [Database Query Optimization](#database-query-optimization)
+ - [Batch Operations](#batch-operations)
+ - [Offline-First Strategy](#offline-first-strategy)
+ - [Rate Limiting](#rate-limiting)
+3. [Future Suggestions](#future-suggestions)
+4. [Summary](#summary)
+
+---
+
+## Overview
+
+### Key Benefits
+
+| Benefit | Description |
+|---------|-------------|
+| **Reduced System Calls** | By caching data and batching queries, the app makes far fewer pacman/database calls |
+| **Improved Speed** | Cached data loads instantly, and batch operations process multiple packages at once |
+| **Increased Reliability** | Offline-first approach and graceful degradation ensure the app works even when databases are unavailable |
+| **System-Friendly** | Rate limiting and smart querying prevent overwhelming the package database |
+
+### Optimization Categories
+
+| Category | Features | Status |
+|----------|----------|--------|
+| **Caching** | • Dependency Cache
• File Cache
• Sandbox Cache
• PKGBUILD Cache
• Official Index Cache | ✅ Implemented |
+| **Database Optimization** | • HashSet Lookups
• Signature-Based Validation
• O(1) Name Lookups | ✅ Implemented |
+| **Batch Operations** | • Batch pacman Queries
• Parallel Processing
• Chunked Requests | ✅ Implemented |
+| **Offline-First** | • yay/paru Cache
• Disk Persistence
• Partial Cache Matching | ✅ Implemented |
+| **Rate Limiting** | • PKGBUILD Fetching
• Minimum Intervals | ✅ Implemented |
+| **Future Improvements** | • Incremental Index Updates
• Query Result Caching
• Smart Prefetching | 🔄 Suggested |
+
+---
+
+## Implemented Optimizations
+
+### Smart Caching
+
+#### Multi-Layer Cache System
+
+| Cache Type | Purpose | Validation Method |
+|------------|---------|-------------------|
+| **Dependency Cache** | Stores resolved dependency graphs | Signature-based (package list) |
+| **File Cache** | Stores file change metadata | Signature-based with partial matching |
+| **Sandbox Cache** | Stores sandbox analysis data | Signature-based with intersection matching |
+| **PKGBUILD Cache** | Stores parsed PKGBUILD data | LRU cache (200 entries) with signature hash |
+| **Official Index Cache** | Stores official package database | Disk persistence with name-to-index mapping |
+
+#### Cache Benefits
+
+- ✅ **No Repeated Queries**: Once resolved, data is **reused from cache** instead of querying again
+- ✅ **Works Offline**: Cached data can be shown even when databases are unavailable
+- ✅ **Faster Loading**: Cached data loads **instantly** without waiting for system calls
+- ✅ **Partial Matching**: File and sandbox caches support partial matching when packages are added/removed
+
+#### Signature-Based Validation
+
+- **Order-Agnostic Signatures**: Package lists are sorted alphabetically to create signatures that ignore ordering
+- **Exact Matching**: Caches validate signatures before use, ensuring data matches the current package list
+- **Partial Matching**: Some caches support loading entries for packages that exist in both cache and current list
+
+---
+
+### Database Query Optimization
+
+#### Efficient Data Structures
+
+| Structure | Purpose | Benefit |
+|-----------|---------|---------|
+| **HashSet for Installed** | O(1) membership tests | **Instant lookup** of installed packages |
+| **HashSet for Explicit** | O(1) explicit package checks | **Fast filtering** of explicitly installed packages |
+| **HashMap Name-to-Index** | O(1) package lookups | **Direct access** to official packages by name |
+| **LRU Cache for PKGBUILD** | Bounded in-memory cache | **Fast parsing** of recently viewed PKGBUILDs |
+
+#### Query Optimization
+
+- **Single Database Load**: Official index is loaded once and kept in memory
+- **Lazy Loading**: Index loads from disk only when memory cache is empty
+- **Index Rebuilding**: Name-to-index mapping is rebuilt after deserialization for fast lookups
+
+---
+
+### Batch Operations
+
+#### Batch pacman Queries
+
+| Operation | Batch Size | Benefit |
+|-----------|------------|---------|
+| **Package Info (-Si)** | **100 packages** | Reduces pacman calls by **99%** for large lists |
+| **Installed Versions (-Q)** | **50 packages** | Combines multiple queries into single command |
+| **Installed Sizes (-Qi)** | **50 packages** | Batches size queries to reduce overhead |
+| **Dependency Info (-Si)** | **50 packages** | Fetches dependencies for multiple packages at once |
+| **Remote File Lists** | All official packages | Single batch query for all file lists |
+
+#### Parallel Processing
+
+- **Background Enrichment**: Package descriptions and metadata are enriched in background tasks
+- **Chunked Processing**: Large batches are split into chunks to avoid command-line length limits
+- **Fallback Strategy**: If batch query fails, falls back to individual queries gracefully
+
+---
+
+### Offline-First Strategy
+
+#### PKGBUILD Caching
+
+| Source | Priority | Description |
+|--------|----------|-------------|
+| **yay/paru Cache** | **First** | Checks local AUR helper cache before network |
+| **Disk Cache** | **Second** | Uses persisted PKGBUILD cache if available |
+| **Network Fetch** | **Last** | Only fetches from network if cache misses |
+
+#### Cache Persistence
+
+- **Disk Storage**: All caches persist to disk as JSON files
+- **Automatic Loading**: Caches are loaded automatically on app startup
+- **Signature Validation**: Caches are validated against current package lists before use
+
+---
+
+### Rate Limiting
+
+#### PKGBUILD Fetching
+
+| Setting | Value | Purpose |
+|---------|-------|---------|
+| **Minimum Interval** | **500ms** | Prevents rapid-fire PKGBUILD requests |
+| **Rate Limiter** | Per-request tracking | Ensures minimum delay between network fetches |
+
+#### Smart Timing
+
+- **Request Tracking**: Last request time is tracked to enforce minimum delays
+- **Automatic Delays**: Waits automatically if requests are too frequent
+
+---
+
+## Future Suggestions
+
+> **Priority Order**: Optimizations are prioritized by their impact on **reducing system calls and database queries**, with **user experience improvements** as a secondary consideration.
+
+### Priority Overview
+
+| Priority | Focus | Count |
+|----------|-------|-------|
+| **Highest** | Database Queries | 1 |
+| **High** | Query Optimization | 2 |
+| **Medium-High** | User Usability + Performance | 1 |
+| **Medium** | Performance | 2 |
+| **Lower** | User Usability | 1 |
+| **Lowest** | Disk Usage | 1 |
+
+---
+
+### 1. Incremental Index Updates ⭐ Highest Priority
+
+**Improves**: **Database Queries**
+
+#### Description
+Track which packages have been added/updated since last index refresh. On update, **only fetch changed packages** instead of re-fetching the entire index. This is partially implemented but could be extended.
+
+#### Impact
+- **Directly reduces the number of database queries** by avoiding re-fetching unchanged packages
+- Can reduce query size by **80-95%** on subsequent refreshes
+
+---
+
+### 2. Query Result Caching ⭐ High Priority
+
+**Improves**: **Database Queries**
+
+#### Description
+Cache results of common pacman queries (e.g., `-Q`, `-Si`, `-Qi`) with short TTLs (5-15 minutes). Reduces redundant queries when the same information is requested multiple times.
+
+#### Impact
+- **Significantly reduces redundant database queries** for frequently accessed package information
+- **Easy to implement** with minimal code changes
+
+---
+
+### 3. Smart Query Deduplication ⭐ High Priority
+
+**Improves**: **Database Queries**
+
+#### Description
+Track pending queries and deduplicate identical requests. If the same query is requested multiple times before completion, combine them into a single query.
+
+#### Impact
+- **Reduces duplicate queries** when multiple parts of the app request the same data simultaneously
+- Prevents wasted system resources on redundant operations
+
+---
+
+### 4. Predictive Cache Warming ⭐ Medium-High Priority
+
+**Improves**: **User Usability**, **Performance**
+
+#### Description
+On app startup, pre-warm caches for commonly accessed packages or packages in the install list. Users see data instantly while background resolution completes.
+
+#### Impact
+- **Improves perceived performance significantly**
+- Reduces user-initiated queries since data is already available when needed
+
+---
+
+### 5. Parallel Cache Resolution ⭐ Medium Priority
+
+**Improves**: **Performance**
+
+#### Description
+Resolve multiple cache types (dependencies, files, sandbox) in parallel when possible. Use background workers to process different cache types simultaneously.
+
+#### Impact
+- **Reduces total resolution time** by processing multiple cache types concurrently
+- Better resource utilization on multi-core systems
+
+---
+
+### 6. Smart Index Enrichment ⭐ Medium Priority
+
+**Improves**: **Performance**
+
+#### Description
+Enrich package index metadata (descriptions, versions) on-demand rather than all at once. Only fetch metadata for packages that are actually viewed or searched.
+
+#### Impact
+- **Reduces initial load time** by deferring non-critical metadata fetching
+- Better resource usage by only fetching what's needed
+
+---
+
+### 7. Query Result Streaming ⭐ Lower Priority
+
+**Improves**: **User Usability**
+
+#### Description
+Stream query results incrementally as they become available, rather than waiting for all results. Users see partial results immediately while remaining data loads.
+
+#### Impact
+- **Improves user experience** by showing results as they arrive
+- Reduces perceived wait times for large queries
+
+---
+
+### 8. Cache Compression ⭐ Lowest Priority
+
+**Improves**: **Disk Usage**
+
+#### Description
+Compress cached data before saving to disk (using gzip). Reduces disk space usage by **60-80%** and speeds up disk I/O for large cache files.
+
+#### Impact
+- Only affects local disk usage, not system queries
+- Useful for users with limited disk space but doesn't reduce data fetching
+
+---
+
+## Summary
+
+### Combined Impact
+
+These measures work together to create a comprehensive optimization strategy:
+
+```
+┌──────────────────────────────────────────────────────────────┐
+│ Optimization Results │
+├──────────────────────────────────────────────────────────────┤
+│ ✅ Reduced System Calls │ ✅ Improved Speed │
+│ ✅ Increased Reliability │ ✅ System-Friendly │
+└──────────────────────────────────────────────────────────────┘
+```
+
+### Key Achievements
+
+- **Reduce System Calls**: By caching data and batching queries, the app **makes far fewer pacman/database calls**
+- **Improve Speed**: Cached data loads **instantly**, and batch operations process multiple packages at once
+- **Increase Reliability**: Offline-first approach and graceful degradation ensure the app works even when databases are unavailable
+- **Respect System Limits**: Rate limiting and smart querying **prevent overwhelming the package database**
+
+### Final Result
+
+The result is a package management system that is **fast**, **efficient**, and **respectful** of both system resources and database capacity.
+
+---
+
+*Last Updated: Document reflects current implementation status and future improvement suggestions*
+
diff --git a/dev/IMPROVEMENTS/news_feed_implementation.md b/dev/IMPROVEMENTS/news_feed_implementation.md
new file mode 100644
index 000000000..9893b8d3c
--- /dev/null
+++ b/dev/IMPROVEMENTS/news_feed_implementation.md
@@ -0,0 +1,69 @@
+# News Feed Implementation Suggestions
+
+## Existing Coverage Snapshot
+- Arch news RSS already fetched (`src/sources/news.rs`) and surfaced via CLI/startup modal.
+- AUR RPC v5 search/info endpoints already used for package search/details (`src/sources/search.rs`, `src/sources/details.rs`).
+- Official repo JSON search API used for indexing/search (`src/index/mirrors.rs`).
+- No ingestion yet for security advisories, package-update RSS feeds, AUR bulk metadata archives, or pacman db tarballs.
+
+## Priority Focus
+- Package news for installed packages only (AUR + official), including recent AUR user comments in the feed view.
+- Security news with user control over scope: toggle between installed-only and all packages.
+
+## Source Coverage and Plan
+| Source | Status | What it is / Uses | Suggested implementation notes |
+| --- | --- | --- | --- |
+| AUR Package Info (RPC v5) | Implemented (details) | AUR’s JSON info endpoint for specific packages; returns version, maintainer, out-of-date flag, popularity, etc. Use for precise “this package updated” detection. | Reuse existing client; build feed items for installed AUR packages when version differs from cached snapshot; include maintainer/orphan/out-of-date markers. |
+| AUR Package Search (RPC v5) | Implemented (search) | AUR’s search endpoint returning lightweight results (name/desc/version/popularity). Good for discovery and link-outs. | No extra ingestion needed; surface as “related/discovery” links inside feed cards when helpful. |
+| AUR Metadata Bulk (.json.gz) | Not implemented | Full compressed dump of all AUR metadata. Enables fast snapshot/diff across the entire AUR without many RPC calls. | Periodic downloader with etag/if-modified-since to cache dir; parse incrementally; filter to installed names first for feed; allow “all packages” diff on demand. |
+| Official Repo Packages (JSON API) | Implemented (index build) | Arch official packages JSON API used today for index. Reliable for name/version/desc per repo/arch. | Tap existing fetches to emit feed items when versions change; persist last-seen map; default to installed set, allow “all” toggle. |
+| Official Repo Search (JSON) | Implemented (search) | JSON search endpoint for official repos; user-driven lookup, not a feed source. | Keep for interactive search only; no feed ingestion needed. |
+| Security Advisories (security.archlinux.org JSON) | Not implemented | Structured advisory feed with IDs, severity, affected packages, fixed versions. | Poll endpoint; generate feed with severity badges; add scope filter (installed vs all); maintain per-advisory read/unread. |
+| Arch News (RSS) | Implemented | Official Arch news posts (manual interventions, announcements). Good for human-readable notices. | Store GUID/pubDate to dedupe; retry/backoff; fast-fail offline. |
+| Package Updates (RSS per repo/arch) | Not implemented | RSS streams listing recent package version updates per repository/architecture. Human-friendly “recent changes” view. | Optional subscription; normalize to package/version tuples; dedupe with official index; default to installed set only. |
+| Pacman Database (db.tar.gz) | Not implemented | Compressed sync dbs (`$repo.db.tar.gz`) containing package metadata/versions; usable offline. | Opt-in download; parse `desc` files to detect version bumps and metadata when APIs fail; limit to installed names to save work. |
+| Arch Packaging Issues (GitLab) | Not implemented | Open issues across the Arch packaging group (pkg-level breakage reports). | Poll GitLab API for open issues under `archlinux/packaging/packages`; filter titles/labels/body against installed package names; cache ETag/Last-Modified; emit feed items with severity/label badges; default to installed set with optional “all issues” toggle. |
+| AUR Comments (HTML/JSON) | Implemented (existing comment fetch) | Latest user comments on AUR packages; useful signal for build breaks or fixes. | Reuse `fetch_aur_comments`; show recent comments for installed AUR packages; track last-seen comment ID to avoid noise. |
+| Arch BBS (bbs.archlinux.org feeds) | Not implemented | Forum threads (Announcements, Pacman/Upgrade Issues, Security, AUR). Atom/RSS via `extern.php?action=feed&type=atom|rss&fid=`; HTML fallback. | Optional and user-configurable; per-forum enable/disable; rate-limit; cache ETag/Last-Modified; parse titles/links/dates; default off to avoid noise. |
+| Full repo snapshots (official/AUR) | Not implemented | Large mirror snapshots of official or AUR repos (packages and/or full metadata). | Opt-in only; for offline/air-gapped or reproducibility; not needed for routine feeds. Track size/bandwidth warnings. |
+
+## Proposed Architecture
+- Create `sources::feeds` with per-source fetchers returning `Vec`; shared `FeedItemKind` enum (`News`, `Advisory`, `Update`, `AURChange`, etc.).
+- Central scheduler: periodic async tasks with jitter; backoff on failure; honor global `--dry-run` (log planned fetches only).
+- Persistence: store last-seen identifiers per source (e.g., advisory ID, RSS GUID/link, pkg+version) under `~/.config/pacsea/cache/news/`.
+- Caching and diffing: compare newly fetched items against last-seen snapshot to generate incremental feed entries; keep small ring buffer to bound disk use.
+- Graceful degradation: if `curl/reqwest` or network unavailable, surface actionable error in UI and continue with other sources.
+- UI: single feed view with filters (source, severity, unread); actions to mark read, open link, copy URL; keyboard-first shortcuts aligned with existing patterns.
+- Testing: add unit tests per fetcher using recorded fixtures; integration test that aggregates mixed sources and enforces dedupe and ordering.
+
+## What the missing sources provide
+- Package Updates (RSS per repo/arch): RSS feeds published per repository/architecture that list recent package version updates; each item usually contains package name, new version, link to package page, and publish date. Not required for update checks (we already have API/index paths), but useful for a human-readable “recent repo changes” lane and for cross-checking unexpected version bumps.
+- Pacman Database (db.tar.gz): Compressed pacman sync database files (`$repo.db.tar.gz`) containing package metadata and versions; parsing them locally allows offline detection of version changes and metadata when APIs are unreachable. Typical size: a few MB for `core`, low tens of MB for `extra`/`multilib`; total usually under ~40–60 MB per arch. Opt-in download. Uses: offline verification, reproducing historical state, secondary diff source when JSON APIs are down, and deep metadata parsing (depends/optdepends/licensing) without invoking pacman on the live system.
+- AUR Metadata Bulk (.json.gz): Periodic full AUR metadata dumps in compressed JSON; processing them yields a snapshot of all AUR packages (name, version, metadata) enabling fast diffing to detect updates without many RPC calls. Typical size: tens of MB compressed (varies with AUR churn); opt-in download. Uses: fast installed-only diffs, bulk analytics (orphaned/out-of-date stats), and reduced network chatter compared to many RPC calls. From a user perspective: quicker “what changed in my AUR packages” checks with fewer waits, less API throttling, and an optional full snapshot for deeper exploration when desired.
+- Arch Packaging Issues (GitLab): Open issues in the Arch packaging group (e.g., build failures, missing deps, bad metadata). Poll the GitLab API with ETag/Last-Modified, filter by installed package names in title/labels/description, and emit feed entries with label/severity badges plus links back to GitLab. Useful for early warning of breakage affecting installed packages; default scope is installed set, with opt-in “all issues” view.
+- Full repo snapshots (official/AUR): Large downloads (hundreds of MB+ depending on mirror scope) that mirror package files and/or full metadata. Opt-in only; suited for offline/air-gapped environments, reproducibility, or accelerating local diff pipelines. Not necessary for normal feed consumption.
+- Arch BBS feeds: Atom/RSS endpoints exposed via `extern.php` for specific forum IDs (e.g., Announcements, Pacman & Package Upgrades, Security). Useful for surfacing forum alerts about breakage, manual interventions, or security discussions; make per-forum opt-in configurable with rate limits and caching.
+
+## News Feed Priority List (stylistic match to Feature Priority doc)
+
+| Tier | Item | Target | Impact | Complexity |
+|------|------|--------|--------|------------|
+| 🔴 | Installed-package news + AUR comments | v0.7.1 | ⭐⭐⭐⭐ | Medium-High |
+| 🔴 | Security advisories (installed/all toggle) | v0.7.1 | ⭐⭐⭐⭐ | Medium |
+| 🟠 | AUR metadata bulk diff (installed-first) | v0.7.x | ⭐⭐⭐ | Medium |
+| 🟠 | Package update RSS lane (optional) | v0.7.x | ⭐⭐ | Low-Medium |
+| 🟠 | Arch packaging issues (installed-first) | v0.7.x | ⭐⭐⭐ | Medium |
+| 🟡 | Pacman db.tar.gz fallback (opt-in) | v0.7.x | ⭐⭐ | Medium |
+| 🟡 | Arch BBS feeds (per-forum opt-in) | v0.7.x | ⭐⭐ | Low-Medium |
+| 🟢 | Full repo snapshots (opt-in/offline) | v0.7.x | ⭐⭐ | High |
+
+### Tier details
+- 🔴 Installed-package news + AUR comments (v0.7.1): Aggregate version bumps for installed official/AUR packages and surface latest AUR comments with last-seen tracking; unread/read state; keyboard-first filters.
+- 🔴 Security advisories with scope toggle (v0.7.1): Fetch security.archlinux.org JSON; show severity, affected packages, fixed versions; filters for installed vs all; per-advisory read/unread; link-out to details.
+- 🟠 AUR metadata bulk diff (v0.7.x): Periodically fetch AUR .json.gz with ETag/If-Modified-Since; parse incrementally, prioritizing installed package names; optional full diff mode for all packages; emit update events while minimizing RPC calls.
+- 🟠 Package update RSS lane (v0.7.x, optional): Subscription per repo/arch; human-friendly recent changes stream; dedupe against official index; default to installed set; low runtime risk if disabled.
+- 🟠 Arch packaging issues (v0.7.x, installed-first): Poll GitLab API for open issues in `archlinux/packaging/packages`; filter by installed package names in title/labels/description; cache ETag/Last-Modified and last-seen issue IDs; emit feed cards with labels/severity and links; opt-in “all issues” mode; rate-limit to avoid API abuse.
+- 🟡 Pacman db.tar.gz fallback (v0.7.x, opt-in): Download per repo/arch on demand; parse desc for version/metadata when APIs are down; offline verification; bandwidth-aware prompts.
+- 🟡 Arch BBS feeds (v0.7.x, per-forum opt-in): Atom/RSS via `extern.php` for selected forums (Announcements, Pacman & Package Upgrades, Security). Default off; user-select forums; rate-limit and cache ETag/Last-Modified; useful for surfacing breakage/manual intervention chatter without overwhelming the feed.
+- 🟢 Full repo snapshots (v0.7.x, opt-in): Large mirror snapshots (official/AUR); for offline/air-gapped/repro builds; not needed for routine feeds; require quota warnings and manual enablement.
+
diff --git a/dev/PR/PR_feat-extended-news.md b/dev/PR/PR_feat-extended-news.md
new file mode 100644
index 000000000..714f6f75d
--- /dev/null
+++ b/dev/PR/PR_feat-extended-news.md
@@ -0,0 +1,101 @@
+## Summary
+
+**What's New:**
+- **News Mode**: Complete news feed system with Arch Linux news, security advisories, package updates, and AUR comments. Filter, sort, bookmark, and track read/unread status. Optional startup mode via `app_start_mode = news`.
+- **JSON Caching**: Cache AUR and official package JSON responses to disk for change detection and offline date fallback
+- **Change Detection**: Compare cached vs current JSON to detect package changes (version, maintainer, dependencies, etc.) and display in news content
+- **Background Retry Queue**: Failed package date fetches are queued and retried sequentially with exponential backoff (10s, 20s, 40s), up to 3 attempts per package
+- **Background Continuation**: After initial limit (50 items), continue fetching all remaining items in background and stream to UI at 1 per second
+- **Package Date Fetching**: Fetches package update dates from archlinux.org JSON endpoints with fallback to cached data, handles multiple repo/arch combinations, and distinguishes HTTP status codes (404 vs 429/5xx)
+- **Date Parsing**: Handles RFC3339 format with milliseconds, RSS dates, and normalizes to YYYY-MM-DD for consistent sorting
+- **AUR Package Allocation**: AUR packages get dedicated allocation (half of limit) to ensure representation alongside official packages
+- **Reliability**: Rate limiting, circuit breakers, and error recovery prevent IP blocking from archlinux.org (404s don't trigger circuit breaker)
+- **Performance**: Multi-layer caching (15min memory, 14 days disk) reduces network requests
+- **Code Quality**: Improved clippy allow comments, reduced function complexity, added CodeQL workflow, improved documentation with rustdoc comments and inline explanations
+- **Refactoring**: Modularized large source files into organized submodules (sources/feeds, sources/news, events/modals/tests, ui/results/title, app_state, workers)
+- **Logging**: Promoted important operational messages from DEBUG to INFO level for better visibility
+- **i18n**: Made config directory alert detection language-agnostic using path patterns instead of hardcoded strings
+
+**Bug Fixes (to existing code in main branch):**
+- Fixed updates window text alignment when package names wrap
+- Fixed options menu key bindings to match display order
+- Fixed `installed_packages.txt` export to respect `installed_packages_mode` setting
+- Fixed alert title showing "Connection issue" instead of "Configuration Directories" for config directory messages after package removal
+- Fixed Shift+Tab keybind to also work in News mode (previously only worked in Package mode)
+- Fixed overflow handling in scroll calculations with proper clamping to prevent incorrect scroll positions
+
+## Type of change
+- [x] feat (new feature)
+- [x] fix (bug fix)
+- [x] refactor (no functional change)
+- [x] perf (performance)
+- [x] test (add/update tests)
+- [x] chore (build/infra/CI)
+- [x] style (formatting, code style)
+- [x] ui (visual/interaction changes)
+
+## How to test
+
+**Basic Tests:**
+```bash
+cargo fmt --all
+cargo clippy --all-targets --all-features -- -D warnings
+cargo check
+cargo test -- --test-threads=1
+```
+
+**News Mode:**
+1. Launch Pacsea, switch to News mode (or set `app_start_mode = news`)
+2. Verify news items load (Arch news, advisories, updates, AUR comments)
+3. Test filters, sorting, read/unread tracking, and bookmarks
+4. Check loading messages appear on first launch
+5. Test Shift+Tab cycles through news sort modes (Date↓, Date↑, Title, Source+Title, Severity+Date, Unread+Date)
+6. Verify background continuation streams additional items after initial 50 (check logs for "continuation worker")
+7. Verify package update dates are correct (not showing today's date when network fails)
+8. Check news content shows JSON change descriptions for AUR and official packages
+9. Verify AUR packages appear even when official packages fill the limit
+
+**Reliability:**
+- Verify no 429 errors in logs (rate limiting working)
+- Test cached content loads faster on subsequent views
+- Verify circuit breaker activates on failures and recovers
+
+**Bug Fixes:**
+- See "Bug Fixes (to existing code in main branch)" section above
+
+## Checklist
+
+- [x] Code compiles, formats, and passes clippy
+- [x] All tests pass
+- [x] New functions have rustdoc comments
+- [x] No `unwrap()` or `expect()` in non-test code
+- [x] Changes respect `--dry-run` flag
+- [x] Code degrades gracefully if tools unavailable
+- [x] No breaking changes
+
+## Notes for reviewers
+
+**Configuration:**
+- `app_start_mode`: "news" to start in News mode (default: "package")
+- `news_filter_*`: Toggle filters for Arch news, advisories, updates, AUR updates/comments
+- `news_max_age_days`: Maximum age filter (default: unlimited)
+
+**New Files:**
+- `news_feed.json`, `news_content_cache.json`, `news_seen_pkg_updates.json`, `news_seen_aur_comments.json`, `news_recent_searches.json`, `news_bookmarks.json`, `news_read_urls.json`
+- `cache/aur_json/` - Cached AUR package JSON responses for change detection
+- `cache/official_json/` - Cached official package JSON responses for change detection and date fallback
+
+**Technical Highlights:**
+- **Rate Limiting**: Serialized archlinux.org requests (1 at a time) with exponential backoff (2s→4s→8s→16s, max 60s)
+- **Circuit Breaker**: Per-endpoint failure detection prevents cascading failures (404s don't trigger circuit breaker)
+- **Conditional Requests**: ETag/Last-Modified headers for efficient updates
+- **Timeouts**: 15s connect, 30s total for news; 5s for AUR comments; 2s for package dates
+- **Fallback**: Uses `checkupdates` when database sync fails (Landlock restrictions)
+- **UI**: Multi-line keybinds, improved alignment, better menu organization
+- **Code Quality**: Enhanced clippy comments with line counts, reduced complexity via helper functions and type aliases, CodeQL workflow
+- **Refactoring**: Split large files (2981-line feeds.rs, 1731-line news.rs, 1689-line tests.rs, 1448-line title.rs) into modular subdirectories; extracted alert message type detection and formatting into helper functions; changed PathBuf parameters to Path for better flexibility; removed unused _url parameter from fetch_package_page_sync
+- **Documentation**: Added comments explaining intentionally unused parameters; added rustdoc comments to test functions; documented permit dropping consequences in rate_limit_archlinux; documented 500ms debounce delay rationale
+- **i18n**: Added translation keys for config directory alerts (en-US, de-DE, hu-HU); made detection language-agnostic using path pattern matching
+
+## Breaking changes
+None. All changes are backward compatible.
diff --git a/dev/PR/PR_feat-update-improvment.md b/dev/PR/PR_feat-update-improvment.md
deleted file mode 100644
index b702610db..000000000
--- a/dev/PR/PR_feat-update-improvment.md
+++ /dev/null
@@ -1,138 +0,0 @@
-## Summary
-- Changed AUR update command from `-Syu` to `-Sua` to only update AUR packages (official packages already updated by pacman)
-- Added confirmation popup when update command fails but AUR update is pending, allowing users to continue with AUR update anyway
-- Fixed confirmation popup to track and display the actual failed command name (pacman, paru, yay, reflector, etc.) instead of always assuming pacman failed
-- Enhanced error reporting with failure summary and failed commands tracking
-- Improved localization with new messages for AUR update confirmation and error reporting
-- Added comprehensive tests for system update modal functionality
-
-## Type of change
-- [x] feat (new feature)
-- [x] fix (bug fix)
-- [x] test (add/update tests)
-
-## Related issues
-Closes #105
-
-## How to test
-List exact steps and commands to verify the change. Include flags like `--dry-run` when appropriate.
-
-```bash
-# Format and lint
-cargo fmt --all
-cargo clippy --all-targets --all-features -- -D warnings
-
-# Run tests
-cargo test -- --test-threads=1
-cargo test system_update -- --test-threads=1
-
-# Test CLI update with dry-run
-RUST_LOG=pacsea=debug cargo run -- --update --dry-run
-
-# Test TUI update flow
-RUST_LOG=pacsea=debug cargo run -- --dry-run
-# Navigate to system update modal, enable AUR update, and test the confirmation popup
-```
-
-## Checklist
-
-**Code Quality:**
-- [x] Code compiles locally (`cargo check`)
-- [x] `cargo fmt --all` ran without changes
-- [x] `cargo clippy --all-targets --all-features -- -D warnings` is clean
-- [x] `cargo test -- --test-threads=1` passes
-- [x] Complexity checks pass for new code (`cargo test complexity -- --nocapture`)
-- [x] All new functions/methods have rustdoc comments (What, Inputs, Output, Details)
-- [x] No `unwrap()` or `expect()` in non-test code
-
-**Testing:**
-- [x] Added or updated tests where it makes sense
-- [x] For bug fixes: created failing tests first, then fixed the issue
-- [x] Tests are meaningful and cover the functionality
-
-**Documentation:**
-- [x] Updated README if behavior, options, or keybinds changed (keep high-level, reference wiki)
-- [x] Updated relevant wiki pages if needed:
- - [How to use Pacsea](https://github.com/Firstp1ck/Pacsea/wiki/How-to-use-Pacsea)
- - [Configuration](https://github.com/Firstp1ck/Pacsea/wiki/Configuration)
- - [Keyboard Shortcuts](https://github.com/Firstp1ck/Pacsea/wiki/Keyboard-Shortcuts)
-- [ ] Updated config examples in `config/` directory if config keys changed
-- [ ] For UI changes: included screenshots and updated `Images/` if applicable
-
-**Compatibility:**
-- [x] Changes respect `--dry-run` flag
-- [x] Code degrades gracefully if `pacman`/`paru`/`yay` are unavailable
-- [x] No breaking changes (or clearly documented if intentional)
-
-**Other:**
-- [x] Not a packaging change for AUR (otherwise propose in `pacsea-bin` or `pacsea-git` repos)
-
-## Notes for reviewers
-
-### Key Changes:
-
-1. **AUR Update Command Change (`-Syu` → `-Sua`)**:
- - Changed in both CLI (`src/args/update.rs`) and TUI (event loop)
- - `-Sua` only updates AUR packages, avoiding redundant official package updates
- - This is more efficient since pacman already updated official packages
-
-2. **Confirmation Popup for Failed Update Commands**:
- - New modal type: `Modal::ConfirmAurUpdate`
- - Triggered when any update command fails but AUR update is pending
- - Tracks which command actually failed (pacman, reflector, pacman-mirrors, etc.) and displays correct name
- - Allows user to continue with AUR update despite previous command failure
- - Handled in `src/events/modals/handlers.rs`
-
-3. **Enhanced Error Reporting**:
- - Added `failed_commands` tracking in `UpdateState`
- - Added failure summary display in CLI output
- - Better error messages in localization files
-
-4. **Event Loop Improvements**:
- - Better handling of update command failure scenarios
- - Extracts failed command name from command list to show accurate error message
- - Preserves password and header chips for AUR update continuation
- - Improved state management for pending AUR commands
-
-5. **Command Failure Tracking**:
- - Added `failed_command` field to `ExecutorOutput::Finished` enum
- - Extracts command name from failed command string (pacman, paru, yay, reflector, etc.)
- - Added `t_fmt2` function to i18n module for two-parameter string formatting
- - Updated localization strings to support dynamic command names in error messages
-
-6. **Testing**:
- - Comprehensive tests added in `src/events/modals/system_update/tests.rs`
- - Tests cover confirmation popup, command execution, and error scenarios
- - Simplified test assertion logic by removing double negative pattern
- - Updated all `ExecutorOutput::Finished` pattern matches to include `failed_command` field
-
-### Areas to Review:
-- Event loop logic for handling update command failures and AUR update continuation
-- Command name extraction logic for accurate error messages
-- Modal state transitions and password preservation
-- Error message clarity and user experience
-
-## Breaking changes
-None. This is a backward-compatible enhancement.
-
-## Additional context
-
-### Technical Details:
-
-**AUR Update Command Rationale:**
-- `-Syu`: Updates both official and AUR packages (redundant after pacman update)
-- `-Sua`: Updates only AUR packages (more efficient, avoids conflicts)
-
-**Confirmation Popup Flow:**
-1. User initiates system update with AUR enabled
-2. Any update command fails (mirrors, pacman, etc.)
-3. System determines which command failed by checking command list
-4. If AUR update is pending, show confirmation popup with correct failed command name
-5. User can choose to continue (Enter/Y) or cancel (Esc/Q/N)
-6. If continued, AUR update proceeds with preserved password/state
-
-**Command Failure Tracking:**
-- Commands are chained with `&&`, so first failure stops execution
-- System extracts command name from failed command string
-- Supports: pacman, paru, yay, reflector, pacman-mirrors, eos-rankmirrors, cachyos-rate-mirrors
-- Error message dynamically shows which command failed instead of always showing "pacman"
\ No newline at end of file
diff --git a/dev/WORKFLOWS/news_fetching_workflow.md b/dev/WORKFLOWS/news_fetching_workflow.md
new file mode 100644
index 000000000..8d2048fb3
--- /dev/null
+++ b/dev/WORKFLOWS/news_fetching_workflow.md
@@ -0,0 +1,450 @@
+# News Fetching Workflow
+
+This document describes the complete news fetching workflow in Pacsea, including startup news fetch, aggregated news feed fetch, and news content fetching.
+
+## Overview
+
+Pacsea fetches news from multiple sources:
+- **Arch News**: Official Arch Linux news feed from `archlinux.org/news/feed`
+- **Security Advisories**: Security advisories from `security.archlinux.org`
+- **Package Updates**: Updates for installed packages from `archlinux.org/packages/`
+- **AUR Comments**: Comments on AUR packages from `aur.archlinux.org`
+
+The system uses coordination mechanisms to prevent concurrent requests to `archlinux.org` which can cause rate limiting or blocking.
+
+## Main Workflow Diagram
+
+```mermaid
+graph TB
+ Start([App Startup]) --> Init[Initialize Auxiliary Workers]
+ Init --> Check{Startup News
Configured?}
+
+ Check -->|No| Skip[Skip News Fetching]
+ Check -->|Yes| CreateChannel[Create Oneshot Channel
for Coordination]
+
+ CreateChannel --> StartupWorker[Spawn Startup News Worker]
+ CreateChannel --> AggWorker[Spawn Aggregated Feed Worker]
+
+ StartupWorker --> StartupJitter[Random Jitter
0-500ms]
+ StartupJitter --> StartupFetch[Fetch Startup News Feed]
+
+ AggWorker --> WaitForStartup[Wait for Startup
Completion Signal]
+ WaitForStartup --> AggDelay[Additional Delay
500-1500ms]
+ AggDelay --> AggFetch[Fetch Aggregated News Feed]
+
+ StartupFetch --> FilterStartup[Filter by Source/Age/Read]
+ FilterStartup --> SendStartup[Send to News Channel]
+ SendStartup --> SignalComplete[Send Completion Signal]
+
+ SignalComplete -.->|Unblocks| WaitForStartup
+
+ AggFetch --> SendAgg[Send to News Feed Channel]
+
+ SendStartup --> UIUpdate1[Update UI with
Startup News]
+ SendAgg --> UIUpdate2[Update UI with
Full News Feed]
+
+ UIUpdate1 --> End([End])
+ UIUpdate2 --> End
+ Skip --> End
+```
+
+## Startup News Fetch Workflow
+
+```mermaid
+sequenceDiagram
+ participant App as App Startup
+ participant Worker as Startup News Worker
+ participant Feed as fetch_news_feed
+ participant Arch as archlinux.org
+ participant Security as security.archlinux.org
+ participant AUR as aur.archlinux.org
+ participant Channel as News Channel
+
+ App->>Worker: Spawn with completion_tx
+ Worker->>Worker: Random jitter (0-500ms)
+ Worker->>Worker: Optimize max_age based on last_startup
+ Worker->>Worker: Ensure installed packages set
+
+ Worker->>Feed: fetch_news_feed(ctx)
+
+ Note over Feed: Sequential fetch for archlinux.org sources
+ Feed->>Arch: Fetch Arch News Feed
+ Arch-->>Feed: News items
+ Feed->>Security: Fetch Security Advisories
+ Security-->>Feed: Advisory items
+
+ Note over Feed: Parallel fetch for other sources
+ par Package Updates
+ Feed->>Arch: Fetch package update info
+ Arch-->>Feed: Update items
+ and AUR Comments
+ Feed->>AUR: Fetch AUR comments
+ AUR-->>Feed: Comment items
+ end
+
+ Feed-->>Worker: Combined news items
+ Worker->>Worker: Filter by source preferences
+ Worker->>Worker: Filter by max_age_days
+ Worker->>Worker: Filter unread items
+ Worker->>Channel: Send filtered items
+ Worker->>App: Send completion signal
+ Channel->>App: Update UI with news
+```
+
+## Aggregated News Feed Fetch Workflow
+
+```mermaid
+sequenceDiagram
+ participant App as App Startup
+ participant Worker as Aggregated Feed Worker
+ participant Signal as Completion Signal
+ participant Feed as fetch_news_feed
+ participant Arch as archlinux.org
+ participant Security as security.archlinux.org
+ participant AUR as aur.archlinux.org
+ participant Channel as News Feed Channel
+
+ App->>Worker: Spawn with completion_rx
+ Worker->>Signal: Wait for startup completion
+ Note over Worker,Signal: Blocks until startup fetch completes
+ Signal-->>Worker: Startup fetch completed
+ Worker->>Worker: Additional delay (500-1500ms)
+ Worker->>Worker: Ensure installed packages set
+
+ Worker->>Feed: fetch_news_feed(ctx)
+
+ Note over Feed: Sequential fetch for archlinux.org sources
+ Feed->>Arch: Fetch Arch News Feed
+ Arch-->>Feed: News items
+ Feed->>Security: Fetch Security Advisories
+ Security-->>Feed: Advisory items
+
+ Note over Feed: Parallel fetch for other sources
+ par Package Updates
+ Feed->>Arch: Fetch package update info
+ Arch-->>Feed: Update items
+ and AUR Comments
+ Feed->>AUR: Fetch AUR comments
+ AUR-->>Feed: Comment items
+ end
+
+ Feed-->>Worker: Combined news items
+ Worker->>Channel: Send full feed payload
+ Channel->>App: Update UI with full feed
+```
+
+## News Content Fetching Workflow
+
+```mermaid
+sequenceDiagram
+ participant UI as User Interface
+ participant Event as Event Handler
+ participant Worker as News Content Worker
+ participant Cache as Content Cache
+ participant RateLimit as Rate Limiter
+ participant Arch as archlinux.org
+ participant AUR as aur.archlinux.org
+
+ UI->>Event: User selects news item
+ Event->>Event: Debounce timer (prevents rapid requests)
+ Event->>Worker: Send URL request
+
+ Worker->>Worker: Drain stale requests
(keep most recent)
+
+ alt URL is AUR package
+ Worker->>AUR: Fetch AUR comments
+ AUR-->>Worker: Comments HTML
+ Worker->>Worker: Render comments
+ else URL is Arch news/article
+ Worker->>Cache: Check in-memory cache (15min TTL)
+ alt Cache Hit
+ Cache-->>Worker: Cached content
+ else Cache Miss
+ Worker->>Cache: Check disk cache (14day TTL)
+ alt Disk Cache Hit
+ Cache-->>Worker: Cached content
+ Worker->>Cache: Populate in-memory cache
+ else Disk Cache Miss
+ Worker->>RateLimit: Check circuit breaker
+ alt Circuit Breaker Open
+ RateLimit-->>Worker: Error (use stale cache if available)
+ else Circuit Breaker Closed
+ Worker->>RateLimit: Acquire rate limit permit
+ RateLimit-->>Worker: Permit acquired
+ Worker->>Arch: Fetch article content
+ Arch-->>Worker: HTML content
+ Worker->>Worker: Parse and extract content
+ Worker->>Cache: Store in memory cache
+ Worker->>Cache: Store in disk cache
+ end
+ end
+ end
+ end
+
+ Worker->>UI: Send content
+ UI->>UI: Display article/comments
+```
+
+## Coordination Mechanism
+
+The coordination between startup and aggregated news fetches prevents concurrent requests to `archlinux.org`:
+
+```mermaid
+graph LR
+ subgraph "App Startup"
+ A[Create Oneshot Channel] --> B[completion_tx]
+ A --> C[completion_rx]
+ end
+
+ subgraph "Startup News Worker"
+ B --> D[Startup Fetch]
+ D --> E[Filter & Send]
+ E --> F[Send completion signal]
+ end
+
+ subgraph "Aggregated Feed Worker"
+ C --> G[Wait for signal]
+ G --> H[Receive signal]
+ H --> I[Additional delay]
+ I --> J[Aggregated Fetch]
+ end
+
+ F -.->|Unblocks| G
+
+ style F fill:#90EE90
+ style G fill:#FFB6C1
+ style H fill:#90EE90
+```
+
+## Fetch Sources Details
+
+### Arch News Fetch
+
+```mermaid
+graph TB
+ Start[Fetch Arch News] --> CheckCache{Check Disk Cache}
+ CheckCache -->|Cache Hit & Valid| ReturnCache[Return Cached Items]
+ CheckCache -->|Cache Miss or Expired| RateLimit[Apply Rate Limiting]
+ RateLimit --> CircuitBreaker{Circuit Breaker
Status}
+ CircuitBreaker -->|Open| UseStale[Use Stale Cache if Available]
+ CircuitBreaker -->|Closed| Fetch[Fetch from archlinux.org/news/feed]
+ Fetch --> Parse[Parse RSS Feed]
+ Parse --> Filter[Filter by Date if max_age set]
+ Filter --> Cache[Update Disk Cache]
+ Cache --> Return[Return News Items]
+ ReturnCache --> Return
+ UseStale --> Return
+```
+
+### Package Updates Fetch
+
+```mermaid
+graph TB
+ Start[Fetch Package Updates] --> LoadUpdates[Load available_updates.txt]
+ LoadUpdates --> Scan[Scan Installed Packages]
+ Scan --> CheckVersions{Compare Versions}
+ CheckVersions -->|New Version| FetchDate[Fetch Package Date
from archlinux.org]
+ CheckVersions -->|Same Version| Skip[Skip Package]
+ FetchDate --> CheckSeen{Already Seen?}
+ CheckSeen -->|Yes| Skip
+ CheckSeen -->|No| CreateItem[Create Update Item]
+ CreateItem --> AddToList[Add to Updates List]
+ AddToList --> Limit{Reached Limit?}
+ Limit -->|No| Scan
+ Limit -->|Yes| Return[Return Updates]
+ Skip --> Limit
+```
+
+### AUR Comments Fetch
+
+```mermaid
+graph TB
+ Start[Fetch AUR Comments] --> GetAUR[Get Installed AUR Packages]
+ GetAUR --> ForEach[For Each AUR Package]
+ ForEach --> FetchComments[Fetch Comments from AUR API]
+ FetchComments --> ParseComments[Parse Comment Data]
+ ParseComments --> CheckSeen{Comment Already Seen?}
+ CheckSeen -->|Yes| Skip[Skip Comment]
+ CheckSeen -->|No| CreateItem[Create Comment Item]
+ CreateItem --> AddToList[Add to Comments List]
+ AddToList --> Limit{Reached Limit?}
+ Limit -->|No| ForEach
+ Limit -->|Yes| Sort[Sort by Date Desc]
+ Sort --> Return[Return Comments]
+ Skip --> Limit
+```
+
+## Rate Limiting and Circuit Breaker
+
+To prevent overwhelming `archlinux.org` and getting blocked:
+
+```mermaid
+stateDiagram-v2
+ [*] --> Closed: Initial State
+
+ Closed --> Open: Consecutive Failures >= Threshold
+ Open --> HalfOpen: Backoff Timeout Expired
+ HalfOpen --> Closed: Success
+ HalfOpen --> Open: Failure
+
+ note right of Closed
+ Normal operation
+ Requests allowed
+ end note
+
+ note right of Open
+ Blocking requests
+ Using cached data
+ Exponential backoff
+ end note
+
+ note right of HalfOpen
+ Testing connection
+ Single request allowed
+ end note
+```
+
+## Caching Strategy
+
+```mermaid
+graph TB
+ Request[News Content Request] --> MemoryCache{In-Memory Cache
15min TTL}
+ MemoryCache -->|Hit| Return[Return Content]
+ MemoryCache -->|Miss| DiskCache{Disk Cache
14day TTL}
+ DiskCache -->|Hit| PopulateMem[Populate Memory Cache]
+ PopulateMem --> Return
+ DiskCache -->|Miss| Network[Fetch from Network]
+ Network --> Parse[Parse Content]
+ Parse --> StoreMem[Store in Memory Cache]
+ StoreMem --> StoreDisk[Store in Disk Cache]
+ StoreDisk --> Return
+```
+
+## Error Handling
+
+```mermaid
+graph TB
+ Fetch[Fetch Operation] --> Success{Success?}
+ Success -->|Yes| Process[Process Results]
+ Success -->|No| ErrorType{Error Type}
+
+ ErrorType -->|Network Timeout| Retry{Retries
Available?}
+ ErrorType -->|Rate Limited| Backoff[Exponential Backoff]
+ ErrorType -->|Circuit Breaker| UseCache[Use Cached Data]
+ ErrorType -->|Parse Error| LogError[Log Error & Continue]
+
+ Retry -->|Yes| RetryFetch[Retry Fetch]
+ RetryFetch --> Fetch
+ Retry -->|No| UseCache
+
+ Backoff --> Wait[Wait Backoff Period]
+ Wait --> Fetch
+
+ UseCache --> Process
+ LogError --> Process
+ Process --> Continue[Continue with Available Data]
+```
+
+## Key Components
+
+### Workers
+
+1. **Startup News Worker** (`spawn_startup_news_worker`)
+ - Fetches news on app startup
+ - Uses startup news preferences
+ - Filters by source, age, and read status
+ - Sends completion signal when done
+
+2. **Aggregated Feed Worker** (`spawn_aggregated_news_feed_worker`)
+ - Fetches full news feed for main UI
+ - Waits for startup fetch to complete
+ - Always fetches all sources (arch news, advisories, updates, comments)
+
+3. **News Content Worker** (`spawn_news_content_worker`)
+ - Fetches individual article/package content on demand
+ - Uses debouncing to prevent rapid requests
+ - Implements caching (memory + disk)
+
+### Coordination
+
+- **Oneshot Channel**: Used to signal completion between startup and aggregated fetches
+- **Random Delays**: Jitter prevents thundering herd problems
+- **Rate Limiting**: Semaphore-based limiting for archlinux.org requests
+- **Circuit Breaker**: Prevents repeated failures from overwhelming the server
+
+### Data Flow
+
+1. **Startup**: App initializes → Workers spawned → Startup fetch begins
+2. **Coordination**: Startup fetch completes → Signal sent → Aggregated fetch unblocks
+3. **Fetching**: Sources fetched sequentially (archlinux.org) or in parallel (others)
+4. **Processing**: Items filtered, sorted, and deduplicated
+5. **Delivery**: Items sent via channels to UI components
+6. **Caching**: Successful fetches cached for future use
+
+## Configuration
+
+News fetching behavior is controlled by settings in `settings.conf`:
+
+- `startup_news_show_arch_news`: Enable/disable Arch news in startup popup
+- `startup_news_show_advisories`: Enable/disable security advisories
+- `startup_news_show_pkg_updates`: Enable/disable package updates
+- `startup_news_show_aur_comments`: Enable/disable AUR comments
+- `startup_news_max_age_days`: Maximum age of news items to show
+- `startup_news_configured`: Whether startup news is configured
+
+## Performance Optimizations
+
+1. **Caching**: Multi-level caching (memory + disk) reduces network requests
+2. **Parallel Fetching**: Non-archlinux.org sources fetched in parallel
+3. **Sequential Fetching**: archlinux.org sources fetched sequentially to prevent blocking
+4. **Debouncing**: User interactions debounced to prevent rapid requests
+5. **Request Draining**: Stale requests discarded, only most recent processed
+6. **Incremental Updates**: Uses last startup timestamp to optimize fetch window
+7. **Circuit Breaker**: Prevents cascading failures during outages
+
+## Troubleshooting
+
+### Issue: Getting blocked by archlinux.org
+
+**Symptoms**: Timeout errors, rate limiting warnings
+
+**Causes**:
+- Concurrent requests to archlinux.org
+- Too many requests in short time
+- Network issues causing retries
+
+**Solutions**:
+- Ensure coordination mechanism is working (check logs for completion signals)
+- Check circuit breaker status
+- Verify rate limiting is active
+- Review backoff delays
+
+### Issue: News not updating
+
+**Symptoms**: Old news items displayed, no new items
+
+**Causes**:
+- Cache not expiring
+- Network failures
+- Filter settings too restrictive
+
+**Solutions**:
+- Clear cache files
+- Check network connectivity
+- Review filter settings (max_age_days, source preferences)
+
+### Issue: Slow news loading
+
+**Symptoms**: Long delays before news appears
+
+**Causes**:
+- Network latency
+- Large number of installed packages
+- Rate limiting delays
+
+**Solutions**:
+- Check network connection
+- Reduce number of sources enabled
+- Review installed package count
+- Check for circuit breaker backoff
+
diff --git a/dev/scripts/check_translation_keys.py b/dev/scripts/check_translation_keys.py
new file mode 100755
index 000000000..5c66b0cb7
--- /dev/null
+++ b/dev/scripts/check_translation_keys.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+"""Check for missing and extra translation keys between locale files.
+
+This script compares a target locale file against the English (en-US) locale file
+and reports:
+- Missing keys (keys in English but not in target) - treated as errors
+- Extra keys (keys in target but not in English) - treated as warnings
+
+Usage:
+ python3 check_translation_keys.py
+
+Examples:
+ python3 check_translation_keys.py hu-HU
+ python3 check_translation_keys.py de-DE
+"""
+
+import sys
+import yaml
+from pathlib import Path
+
+
+def flatten_dict(d, parent_key='', sep='.'):
+ """Flatten a nested dictionary into dot-notation keys.
+
+ Args:
+ d: Dictionary to flatten
+ parent_key: Parent key prefix (for recursion)
+ sep: Separator for keys (default: '.')
+
+ Returns:
+ Dictionary with flattened keys
+ """
+ items = []
+ for k, v in d.items():
+ new_key = f'{parent_key}{sep}{k}' if parent_key else k
+ if isinstance(v, dict):
+ items.extend(flatten_dict(v, new_key, sep=sep).items())
+ elif isinstance(v, list):
+ items.append((new_key, v))
+ else:
+ items.append((new_key, v))
+ return dict(items)
+
+
+def find_locales_dir():
+ """Find the locales directory.
+
+ Returns:
+ Path to locales directory or None if not found
+ """
+ # Try development location first
+ dev_path = Path(__file__).parent.parent.parent / 'config' / 'locales'
+ if dev_path.exists() and dev_path.is_dir():
+ return dev_path
+
+ # Try installed location
+ installed_path = Path('/usr/share/pacsea/locales')
+ if installed_path.exists() and installed_path.is_dir():
+ return installed_path
+
+ return None
+
+
+def main():
+ """Main function to check for missing translation keys."""
+ if len(sys.argv) < 2:
+ print("Usage: python3 check_translation_keys.py ")
+ print("Example: python3 check_translation_keys.py hu-HU")
+ sys.exit(1)
+
+ target_locale = sys.argv[1]
+
+ # Find locales directory
+ locales_dir = find_locales_dir()
+ if locales_dir is None:
+ print("Error: Could not find locales directory")
+ sys.exit(1)
+
+ en_file = locales_dir / 'en-US.yml'
+ target_file = locales_dir / f'{target_locale}.yml'
+
+ # Check if files exist
+ if not en_file.exists():
+ print(f"Error: English locale file not found: {en_file}")
+ sys.exit(1)
+
+ if not target_file.exists():
+ print(f"Error: Target locale file not found: {target_file}")
+ sys.exit(1)
+
+ # Load English file
+ try:
+ with open(en_file, 'r', encoding='utf-8') as f:
+ en_data = yaml.safe_load(f)
+ except Exception as e:
+ print(f"Error loading English file: {e}")
+ sys.exit(1)
+
+ # Load target locale file
+ try:
+ with open(target_file, 'r', encoding='utf-8') as f:
+ target_data = yaml.safe_load(f)
+ except Exception as e:
+ print(f"Error loading target locale file: {e}")
+ sys.exit(1)
+
+ # Get the app section from both
+ en_app = en_data.get('en-US', {}).get('app', {})
+ target_app = target_data.get(target_locale, {}).get('app', {})
+
+ # Flatten both
+ en_flat = flatten_dict(en_app)
+ target_flat = flatten_dict(target_app)
+
+ # Find missing keys in target (keys in English but not in target)
+ missing = []
+ for key in sorted(en_flat.keys()):
+ if key not in target_flat:
+ missing.append(key)
+
+ # Find extra keys in target (keys in target but not in English)
+ extra = []
+ for key in sorted(target_flat.keys()):
+ if key not in en_flat:
+ extra.append(key)
+
+ # Report results
+ print(f"Comparing {target_locale}.yml against en-US.yml")
+ print(f"English keys: {len(en_flat)}")
+ print(f"{target_locale} keys: {len(target_flat)}")
+ print()
+
+ has_errors = False
+ has_warnings = False
+
+ # Report missing keys (errors)
+ if missing:
+ has_errors = True
+ print(f"✗ ERROR: Found {len(missing)} missing keys in {target_locale}.yml:")
+ for key in missing:
+ print(f" - app.{key}")
+ print()
+
+ # Report extra keys (warnings)
+ if extra:
+ has_warnings = True
+ print(f"⚠ WARNING: Found {len(extra)} extra keys in {target_locale}.yml (not in en-US.yml):")
+ for key in extra:
+ print(f" - app.{key}")
+ # Show the value to help identify what it is
+ print(f" Value: {target_flat[key]}")
+ print()
+
+ # Exit with appropriate code
+ if has_errors:
+ print("✗ Translation check FAILED: Missing keys found")
+ sys.exit(1)
+ elif has_warnings:
+ print("⚠ Translation check PASSED with warnings: Extra keys found (may be duplicates or obsolete)")
+ sys.exit(0)
+ else:
+ print("✓ Translation check PASSED: All keys match!")
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/dev/scripts/release.fish b/dev/scripts/release.fish
index 17c07028f..4134d08f1 100755
--- a/dev/scripts/release.fish
+++ b/dev/scripts/release.fish
@@ -599,14 +599,24 @@ function phase4_build_release
end
end
- # Step 4.7: Publish to crates.io
+ # Step 4.7: Verify crates.io publish (dry-run)
+ log_step "Verifying crates.io publish (dry-run)"
+
+ dry_run_cmd "cargo publish --dry-run"
+ if test $status -ne 0
+ log_error "cargo publish --dry-run failed"
+ return 1
+ end
+ log_success "crates.io publish verification passed"
+
+ # Step 4.8: Publish to crates.io
log_step "Publishing to crates.io"
if test "$DRY_RUN" = true
log_info "[DRY-RUN] Would run 'cargo publish' to publish to crates.io"
else
cd "$PACSEA_DIR"
- dry_run_cmd "cargo publish"
+ cargo publish
if test $status -eq 0
log_success "Published to crates.io"
diff --git a/dev/scripts/test_archlinux.org.sh b/dev/scripts/test_archlinux.org.sh
new file mode 100755
index 000000000..b55bd45f7
--- /dev/null
+++ b/dev/scripts/test_archlinux.org.sh
@@ -0,0 +1,218 @@
+#! /usr/bin/env bash
+# test_archlinux_endpoints.sh
+
+echo "Testing archlinux.org endpoints..."
+
+# Timeout configuration (in seconds)
+CONNECT_TIMEOUT=10
+MAX_TIME=30
+
+# Initialize evaluation arrays
+declare -a test_names
+declare -a http_statuses
+declare -a response_times
+declare -a parse_results
+declare -a issues
+
+# Test 1: News Feed
+echo -e "\n1. News Feed:"
+NEWS_BODY=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME "https://archlinux.org/feeds/news/")
+NEWS_CURL_EXIT=$?
+NEWS_STATS=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME -w "%{http_code}|%{time_total}" \
+ "https://archlinux.org/feeds/news/" -o /dev/null)
+NEWS_STATS_EXIT=$?
+NEWS_HTTP=$(echo "$NEWS_STATS" | cut -d'|' -f1)
+NEWS_TIME=$(echo "$NEWS_STATS" | cut -d'|' -f2)
+
+# Exit on timeout or connection failure
+# Exit codes: 28=timeout, 6=couldn't resolve host, 7=failed to connect
+if [ $NEWS_CURL_EXIT -eq 28 ] || [ $NEWS_CURL_EXIT -eq 6 ] || [ $NEWS_CURL_EXIT -eq 7 ] || \
+ [ $NEWS_STATS_EXIT -eq 28 ] || [ $NEWS_STATS_EXIT -eq 6 ] || [ $NEWS_STATS_EXIT -eq 7 ] || \
+ [ -z "$NEWS_HTTP" ] || [ "$NEWS_HTTP" = "000" ]; then
+ echo "❌ Connection timeout or failure after ${CONNECT_TIMEOUT}s"
+ echo "Exiting script."
+ exit 1
+fi
+
+echo "$NEWS_BODY" | head -20
+echo "HTTP Status: $NEWS_HTTP | Time: ${NEWS_TIME}s"
+
+test_names+=("News Feed")
+http_statuses+=("$NEWS_HTTP")
+response_times+=("$NEWS_TIME")
+if echo "$NEWS_BODY" | grep -q "/dev/null)
+PKG_VER=$(echo "$PKG_BODY" | jq -r '.pkg.pkgver // .pkg.version // .pkgver // "unknown"' 2>/dev/null)
+echo "$PKG_NAME $PKG_VER"
+echo "HTTP Status: $PKG_HTTP | Time: ${PKG_TIME}s"
+
+test_names+=("Package JSON")
+http_statuses+=("$PKG_HTTP")
+response_times+=("$PKG_TIME")
+# Check if JSON is valid and has either pkgname or .pkg.pkgname or .pkg.name
+if echo "$PKG_BODY" | jq -e '.pkgname // .pkg.pkgname // .pkg.name' >/dev/null 2>&1; then
+ parse_results+=("OK")
+else
+ parse_results+=("FAIL")
+ issues+=("Package JSON: Invalid JSON or missing package name field")
+fi
+
+# Test 3: Package Search
+echo -e "\n3. Package Search:"
+SEARCH_BODY=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME "https://archlinux.org/packages/search/json/?repo=core&arch=x86_64&limit=5&page=1")
+SEARCH_CURL_EXIT=$?
+SEARCH_STATS=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME -w "%{http_code}|%{time_total}" \
+ "https://archlinux.org/packages/search/json/?repo=core&arch=x86_64&limit=5&page=1" -o /dev/null)
+SEARCH_STATS_EXIT=$?
+SEARCH_HTTP=$(echo "$SEARCH_STATS" | cut -d'|' -f1)
+SEARCH_TIME=$(echo "$SEARCH_STATS" | cut -d'|' -f2)
+
+# Exit on timeout or connection failure
+# Exit codes: 28=timeout, 6=couldn't resolve host, 7=failed to connect
+if [ $SEARCH_CURL_EXIT -eq 28 ] || [ $SEARCH_CURL_EXIT -eq 6 ] || [ $SEARCH_CURL_EXIT -eq 7 ] || \
+ [ $SEARCH_STATS_EXIT -eq 28 ] || [ $SEARCH_STATS_EXIT -eq 6 ] || [ $SEARCH_STATS_EXIT -eq 7 ] || \
+ [ -z "$SEARCH_HTTP" ] || [ "$SEARCH_HTTP" = "000" ]; then
+ echo "❌ Connection timeout or failure after ${CONNECT_TIMEOUT}s"
+ echo "Exiting script."
+ exit 1
+fi
+
+echo "$SEARCH_BODY" | jq '.results | length' 2>/dev/null
+echo "HTTP Status: $SEARCH_HTTP | Time: ${SEARCH_TIME}s"
+
+test_names+=("Package Search")
+http_statuses+=("$SEARCH_HTTP")
+response_times+=("$SEARCH_TIME")
+if echo "$SEARCH_BODY" | jq -e '.results' >/dev/null 2>&1; then
+ parse_results+=("OK")
+else
+ parse_results+=("FAIL")
+ issues+=("Package Search: Invalid JSON or missing .results")
+fi
+
+# Test 4: Mirror Status
+echo -e "\n4. Mirror Status:"
+MIRROR_BODY=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME "https://archlinux.org/mirrors/status/json/")
+MIRROR_CURL_EXIT=$?
+MIRROR_STATS=$(curl -s --connect-timeout $CONNECT_TIMEOUT --max-time $MAX_TIME -w "%{http_code}|%{time_total}" \
+ "https://archlinux.org/mirrors/status/json/" -o /dev/null)
+MIRROR_STATS_EXIT=$?
+MIRROR_HTTP=$(echo "$MIRROR_STATS" | cut -d'|' -f1)
+MIRROR_TIME=$(echo "$MIRROR_STATS" | cut -d'|' -f2)
+
+# Exit on timeout or connection failure
+# Exit codes: 28=timeout, 6=couldn't resolve host, 7=failed to connect
+if [ $MIRROR_CURL_EXIT -eq 28 ] || [ $MIRROR_CURL_EXIT -eq 6 ] || [ $MIRROR_CURL_EXIT -eq 7 ] || \
+ [ $MIRROR_STATS_EXIT -eq 28 ] || [ $MIRROR_STATS_EXIT -eq 6 ] || [ $MIRROR_STATS_EXIT -eq 7 ] || \
+ [ -z "$MIRROR_HTTP" ] || [ "$MIRROR_HTTP" = "000" ]; then
+ echo "❌ Connection timeout or failure after ${CONNECT_TIMEOUT}s"
+ echo "Exiting script."
+ exit 1
+fi
+
+echo "$MIRROR_BODY" | jq 'keys | length' 2>/dev/null
+echo "HTTP Status: $MIRROR_HTTP | Time: ${MIRROR_TIME}s"
+
+test_names+=("Mirror Status")
+http_statuses+=("$MIRROR_HTTP")
+response_times+=("$MIRROR_TIME")
+if echo "$MIRROR_BODY" | jq -e 'keys' >/dev/null 2>&1; then
+ parse_results+=("OK")
+else
+ parse_results+=("FAIL")
+ issues+=("Mirror Status: Invalid JSON or missing keys")
+fi
+
+# Evaluation
+echo -e "\n""$(printf '=%.0s' {1..60})"
+echo "EVALUATION SUMMARY"
+echo -e "$(printf '=%.0s' {1..60})"
+
+all_good=true
+max_time=5.0
+
+for i in "${!test_names[@]}"; do
+ name="${test_names[$i]}"
+ http="${http_statuses[$i]}"
+ time="${response_times[$i]}"
+ parse="${parse_results[$i]}"
+
+ # Check HTTP status
+ if [ -z "$http" ] || [ "$http" = "000" ] || [ "$http" != "200" ]; then
+ all_good=false
+ if [ -z "$http" ] || [ "$http" = "000" ]; then
+ issues+=("$name: Connection failed or timed out")
+ else
+ issues+=("$name: HTTP status $http (expected 200)")
+ fi
+ fi
+
+ # Check response time (using awk for float comparison)
+ # awk returns 0 (success) if condition is true, 1 if false
+ if [ -z "$time" ]; then
+ all_good=false
+ issues+=("$name: Failed to measure response time")
+ elif awk "BEGIN {if ($time > $max_time) exit 0; else exit 1}"; then
+ all_good=false
+ issues+=("$name: Slow response time ${time}s (threshold: ${max_time}s)")
+ fi
+
+ # Check parse result
+ if [ "$parse" != "OK" ]; then
+ all_good=false
+ fi
+done
+
+if [ "$all_good" = true ]; then
+ echo -e "\n✅ ALL TESTS PASSED"
+ echo "All endpoints are responding correctly:"
+ for i in "${!test_names[@]}"; do
+ echo " • ${test_names[$i]}: HTTP ${http_statuses[$i]}, ${response_times[$i]}s, Parse ${parse_results[$i]}"
+ done
+else
+ echo -e "\n❌ ISSUES DETECTED"
+ echo ""
+ echo "Problems found:"
+ for issue in "${issues[@]}"; do
+ echo " • $issue"
+ done
+ echo ""
+ echo "Test details:"
+ for i in "${!test_names[@]}"; do
+ status_icon="✅"
+ time_check=$(awk "BEGIN {if (${response_times[$i]} > $max_time) exit 0; else exit 1}" && echo "slow" || echo "ok")
+ if [ "${http_statuses[$i]}" != "200" ] || [ "${parse_results[$i]}" != "OK" ] || [ "$time_check" = "slow" ]; then
+ status_icon="❌"
+ fi
+ echo " $status_icon ${test_names[$i]}: HTTP ${http_statuses[$i]}, ${response_times[$i]}s, Parse ${parse_results[$i]}"
+ done
+fi
+
+echo -e "$(printf '=%.0s' {1..60})"
\ No newline at end of file
diff --git a/pacsea.code-workspace b/pacsea.code-workspace
new file mode 100644
index 000000000..9888f4548
--- /dev/null
+++ b/pacsea.code-workspace
@@ -0,0 +1,126 @@
+{
+ // Workspace folders - defines which directories are included in this workspace
+ "folders": [
+ {
+ "path": "."
+ }
+ ],
+ // Workspace-specific settings that apply when this workspace is opened
+ "settings": {
+ // Files and folders to hide from the file explorer (but still accessible via search)
+ "files.exclude": {
+ "**/target": true, // Rust build output directory
+ "**/debug": true, // Rust debug build directory
+ "**/*.rs.bk": true, // Rustfmt backup files
+ "**/*.pdb": true, // Windows debug symbols
+ ".claude": true, // Claude AI cache directory
+ ".cache": true, // Cache directories
+ "**/.cache": true, // All cache directories recursively
+ "**/__pycache__": true, // Python cache directories
+ "**/*.pyc": true, // Python compiled bytecode
+ "AUR_BUILD": true, // AUR build artifacts
+ "repository": true, // Repository cache directory
+ ".jj": true, // Jujutsu version control directory
+ "exported-assets": true, // Exported assets directory
+ "semgrep-bin": true, // Semgrep binary directory
+ "**/*.lnk": true, // Windows shortcut files
+ "**/details_cache.json": true, // Pacsea package details cache
+ "**/recent_searches.json": true, // Pacsea recent searches cache
+ "**/official_index.json": true, // Pacsea official package index cache
+ "**/install_list.json": true, // Pacsea install list cache
+ "**/install_log.txt": true, // Pacsea install log files
+ },
+ // Files and folders to exclude from search results
+ "search.exclude": {
+ "**/target": true, // Rust build output directory
+ "**/debug": true, // Rust debug build directory
+ "**/*.rs.bk": true, // Rustfmt backup files
+ ".claude": true, // Claude AI cache directory
+ ".cache": true, // Cache directories
+ "**/.cache": true, // All cache directories recursively
+ "**/__pycache__": true, // Python cache directories
+ "**/*.pyc": true, // Python compiled bytecode
+ "AUR_BUILD": true, // AUR build artifacts
+ "repository": true, // Repository cache directory
+ ".jj": true, // Jujutsu version control directory
+ "exported-assets": true, // Exported assets directory
+ "semgrep-bin": true, // Semgrep binary directory
+ "**/*.lnk": true, // Windows shortcut files
+ "**/details_cache.json": true, // Pacsea package details cache
+ "**/recent_searches.json": true, // Pacsea recent searches cache
+ "**/official_index.json": true, // Pacsea official package index cache
+ "**/install_list.json": true, // Pacsea install list cache
+ "**/install_log.txt": true, // Pacsea install log files
+ },
+ // Files and folders to exclude from file system watcher (improves performance)
+ "files.watcherExclude": {
+ "**/target/**": true, // Rust build output (recursive)
+ "**/debug/**": true, // Rust debug builds (recursive)
+ "**/.cache/**": true, // Cache directories (recursive)
+ "**/__pycache__/**": true, // Python cache (recursive)
+ "AUR_BUILD/**": true, // AUR build artifacts (recursive)
+ "repository/**": true, // Repository cache (recursive)
+ ".jj/**": true, // Jujutsu VCS (recursive)
+ "exported-assets/**": true // Exported assets (recursive)
+ },
+ // Automatically format code when saving files
+ "editor.formatOnSave": true,
+ // Show a vertical ruler at column 100 (matches rustfmt max_width)
+ "editor.rulers": [
+ 100
+ ],
+ // Rust-specific editor settings
+ "[rust]": {
+ "editor.defaultFormatter": "rust-lang.rust-analyzer", // Use rust-analyzer for formatting
+ "editor.tabSize": 4, // Tab size matches rustfmt.toml
+ "editor.insertSpaces": true // Use spaces instead of tabs
+ },
+ // TOML-specific editor settings
+ "[toml]": {
+ "editor.defaultFormatter": "tamasfe.even-better-toml" // Use even-better-toml for TOML files
+ },
+ // YAML-specific editor settings
+ "[yaml]": {
+ "editor.defaultFormatter": "redhat.vscode-yaml" // Use YAML extension for YAML files
+ },
+ // Rust-analyzer: Use clippy instead of cargo check for diagnostics
+ "rust-analyzer.check.command": "clippy",
+ // Rust-analyzer: Check all targets (lib, bins, tests, examples, benches)
+ "rust-analyzer.check.allTargets": true,
+ // Rust-analyzer: Check with all features enabled
+ "rust-analyzer.check.features": "all",
+ // Rust-analyzer: Additional arguments passed to clippy (deny warnings)
+ "rust-analyzer.check.extraArgs": [
+ "--",
+ "-D",
+ "warnings"
+ ],
+ // Rust-analyzer: Enable all Cargo features for better code analysis
+ "rust-analyzer.cargo.allFeatures": true,
+ // Rust-analyzer: Show type hints inline (e.g., variable types)
+ "rust-analyzer.inlayHints.typeHints.enable": true,
+ // Rust-analyzer: Show parameter names in function calls
+ "rust-analyzer.inlayHints.parameterHints.enable": true,
+ // Rust-analyzer: Show hints for method chaining
+ "rust-analyzer.inlayHints.chainingHints.enable": true,
+ // Rust-analyzer: Show lifetime elision hints (skip trivial cases)
+ "rust-analyzer.inlayHints.lifetimeElisionHints.enable": "skip_trivial",
+ // Rust-analyzer: Automatically add imports when completing code
+ "rust-analyzer.completion.autoimport.enable": true,
+ // File type associations for syntax highlighting
+ "files.associations": {
+ "*.conf": "properties", // Treat .conf files as properties files
+ "*.yml": "yaml" // Treat .yml files as YAML
+ }
+ },
+ // Recommended VS Code extensions for this workspace
+ "extensions": {
+ "recommendations": [
+ "rust-lang.rust-analyzer", // Official Rust language server and formatter
+ "tamasfe.even-better-toml", // Enhanced TOML support for Cargo.toml and config files
+ "serayuzgur.crates", // Dependency management for Cargo.toml
+ "redhat.vscode-yaml", // YAML support for i18n locale files
+ "vadimcn.vscode-lldb" // LLDB debugger for Rust debugging
+ ]
+ }
+}
\ No newline at end of file
diff --git a/src/app/mod.rs b/src/app/mod.rs
index 2c26b7c87..4fac58f88 100644
--- a/src/app/mod.rs
+++ b/src/app/mod.rs
@@ -3,13 +3,19 @@
//! This module organizes the TUI runtime into smaller files to improve
//! maintainability and keep individual files under 500 lines.
+/// Dependency cache for storing resolved dependency information.
mod deps_cache;
+/// File cache for storing package file information.
mod files_cache;
+/// Persistence layer for saving and loading application state.
mod persist;
+/// Recent queries and history management.
mod recent;
+/// Runtime event loop and background workers.
mod runtime;
pub mod sandbox_cache;
pub mod services_cache;
+/// Terminal setup and restoration utilities.
mod terminal;
// Re-export the public entrypoint so callers keep using `app::run(...)`.
diff --git a/src/app/persist.rs b/src/app/persist.rs
index 7e0cd8de0..4606ae1e5 100644
--- a/src/app/persist.rs
+++ b/src/app/persist.rs
@@ -80,6 +80,113 @@ pub fn maybe_flush_recent(app: &mut AppState) {
}
}
+/// What: Persist the news search history to disk if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_recent` and `news_recent_path`
+pub fn maybe_flush_news_recent(app: &mut AppState) {
+ if !app.news_recent_dirty {
+ return;
+ }
+ let values: Vec = app.news_recent.iter().map(|(_, v)| v.clone()).collect();
+ if let Ok(s) = serde_json::to_string(&values) {
+ tracing::debug!(
+ path = %app.news_recent_path.display(),
+ bytes = s.len(),
+ "[Persist] Writing news recent searches to disk"
+ );
+ match fs::write(&app.news_recent_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_recent_path.display(),
+ "[Persist] News recent searches persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_recent_path.display(),
+ error = %e,
+ "[Persist] Failed to write news recent searches"
+ );
+ }
+ }
+ app.news_recent_dirty = false;
+ }
+}
+
+/// What: Persist news bookmarks to disk if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_bookmarks` and `news_bookmarks_path`
+pub fn maybe_flush_news_bookmarks(app: &mut AppState) {
+ if !app.news_bookmarks_dirty {
+ return;
+ }
+ if let Ok(s) = serde_json::to_string(&app.news_bookmarks) {
+ tracing::debug!(
+ path = %app.news_bookmarks_path.display(),
+ bytes = s.len(),
+ "[Persist] Writing news bookmarks to disk"
+ );
+ match fs::write(&app.news_bookmarks_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_bookmarks_path.display(),
+ "[Persist] News bookmarks persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_bookmarks_path.display(),
+ error = %e,
+ "[Persist] Failed to write news bookmarks"
+ );
+ }
+ }
+ app.news_bookmarks_dirty = false;
+ }
+}
+
+/// What: Persist the news article content cache to disk if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_content_cache` and `news_content_cache_path`
+///
+/// Output:
+/// - Writes `news_content_cache` JSON to `news_content_cache_path` and clears the dirty flag on success.
+///
+/// Details:
+/// - Caches article content (URL -> content string) to avoid re-fetching on restart.
+pub fn maybe_flush_news_content_cache(app: &mut AppState) {
+ if !app.news_content_cache_dirty {
+ return;
+ }
+ if let Ok(s) = serde_json::to_string(&app.news_content_cache) {
+ tracing::debug!(
+ path = %app.news_content_cache_path.display(),
+ bytes = s.len(),
+ entries = app.news_content_cache.len(),
+ "[Persist] Writing news content cache to disk"
+ );
+ match fs::write(&app.news_content_cache_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_content_cache_path.display(),
+ "[Persist] News content cache persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_content_cache_path.display(),
+ error = %e,
+ "[Persist] Failed to write news content cache"
+ );
+ }
+ }
+ app.news_content_cache_dirty = false;
+ }
+}
+
/// What: Persist the set of read Arch news URLs to disk if marked dirty.
///
/// Inputs:
@@ -116,6 +223,120 @@ pub fn maybe_flush_news_read(app: &mut AppState) {
}
}
+/// What: Persist the set of read news IDs to disk if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_read_ids` and `news_read_ids_path`
+///
+/// Output:
+/// - Writes `news_read_ids` JSON to `news_read_ids_path` and clears the dirty flag on success.
+pub fn maybe_flush_news_read_ids(app: &mut AppState) {
+ if !app.news_read_ids_dirty {
+ return;
+ }
+ if let Ok(s) = serde_json::to_string(&app.news_read_ids) {
+ tracing::debug!(
+ path = %app.news_read_ids_path.display(),
+ bytes = s.len(),
+ "[Persist] Writing news read IDs to disk"
+ );
+ match fs::write(&app.news_read_ids_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_read_ids_path.display(),
+ "[Persist] News read IDs persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_read_ids_path.display(),
+ error = %e,
+ "[Persist] Failed to write news read IDs"
+ );
+ }
+ }
+ app.news_read_ids_dirty = false;
+ }
+}
+
+/// What: Persist last-seen package versions for news updates if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_seen_pkg_versions` and its path.
+///
+/// Output:
+/// - Writes JSON file when dirty, clears the dirty flag on success.
+///
+/// Details:
+/// - No-op when dirty flag is false; logs success/failure.
+pub fn maybe_flush_news_seen_versions(app: &mut AppState) {
+ if !app.news_seen_pkg_versions_dirty {
+ return;
+ }
+ if let Ok(s) = serde_json::to_string(&app.news_seen_pkg_versions) {
+ tracing::debug!(
+ path = %app.news_seen_pkg_versions_path.display(),
+ bytes = s.len(),
+ "[Persist] Writing news seen package versions to disk"
+ );
+ match fs::write(&app.news_seen_pkg_versions_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_seen_pkg_versions_path.display(),
+ "[Persist] News seen package versions persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_seen_pkg_versions_path.display(),
+ error = %e,
+ "[Persist] Failed to write news seen package versions"
+ );
+ }
+ }
+ app.news_seen_pkg_versions_dirty = false;
+ }
+}
+
+/// What: Persist last-seen AUR comments if marked dirty.
+///
+/// Inputs:
+/// - `app`: Application state containing `news_seen_aur_comments` and its path.
+///
+/// Output:
+/// - Writes JSON file when dirty, clears the dirty flag on success.
+///
+/// Details:
+/// - No-op when dirty flag is false; logs success/failure.
+pub fn maybe_flush_news_seen_aur_comments(app: &mut AppState) {
+ if !app.news_seen_aur_comments_dirty {
+ return;
+ }
+ if let Ok(s) = serde_json::to_string(&app.news_seen_aur_comments) {
+ tracing::debug!(
+ path = %app.news_seen_aur_comments_path.display(),
+ bytes = s.len(),
+ "[Persist] Writing news seen AUR comments to disk"
+ );
+ match fs::write(&app.news_seen_aur_comments_path, &s) {
+ Ok(()) => {
+ tracing::debug!(
+ path = %app.news_seen_aur_comments_path.display(),
+ "[Persist] News seen AUR comments persisted"
+ );
+ }
+ Err(e) => {
+ tracing::warn!(
+ path = %app.news_seen_aur_comments_path.display(),
+ error = %e,
+ "[Persist] Failed to write news seen AUR comments"
+ );
+ }
+ }
+ app.news_seen_aur_comments_dirty = false;
+ }
+}
+
/// What: Persist the announcement read IDs to disk if marked dirty.
///
/// Inputs:
@@ -162,21 +383,17 @@ pub fn maybe_flush_announcement_read(app: &mut AppState) {
///
/// Output:
/// - Writes dependency cache JSON to `deps_cache_path` and clears dirty flag on success.
-/// - If install list is empty, removes the cache file.
+/// - If install list is empty, ensures an empty cache file exists instead of deleting it.
pub fn maybe_flush_deps_cache(app: &mut AppState) {
if app.install_list.is_empty() {
- // Clear cache file if install list is empty
- if let Err(e) = fs::remove_file(&app.deps_cache_path) {
- tracing::debug!(
- path = %app.deps_cache_path.display(),
- error = %e,
- "[Persist] Failed to remove dependency cache (may not exist)"
- );
- } else {
+ // Write an empty cache file when nothing is queued to keep the path present.
+ if app.deps_cache_dirty || !app.deps_cache_path.exists() {
+ let empty_signature: Vec = Vec::new();
tracing::debug!(
path = %app.deps_cache_path.display(),
- "[Persist] Removed dependency cache because install list is empty"
+ "[Persist] Writing empty dependency cache because install list is empty"
);
+ deps_cache::save_cache(&app.deps_cache_path, &empty_signature, &[]);
}
app.deps_cache_dirty = false;
return;
@@ -207,21 +424,17 @@ pub fn maybe_flush_deps_cache(app: &mut AppState) {
///
/// Output:
/// - Writes file cache JSON to `files_cache_path` and clears dirty flag on success.
-/// - If install list is empty, removes the cache file.
+/// - If install list is empty, ensures an empty cache file exists instead of deleting it.
pub fn maybe_flush_files_cache(app: &mut AppState) {
if app.install_list.is_empty() {
- // Clear cache file if install list is empty
- if let Err(e) = fs::remove_file(&app.files_cache_path) {
+ // Write an empty cache file when nothing is queued to keep the path present.
+ if app.files_cache_dirty || !app.files_cache_path.exists() {
+ let empty_signature: Vec = Vec::new();
tracing::debug!(
path = %app.files_cache_path.display(),
- error = %e,
- "[Persist] Failed to remove file cache (may not exist)"
- );
- } else {
- tracing::debug!(
- path = %app.files_cache_path.display(),
- "[Persist] Removed file cache because install list is empty"
+ "[Persist] Writing empty file cache because install list is empty"
);
+ files_cache::save_cache(&app.files_cache_path, &empty_signature, &[]);
}
app.files_cache_dirty = false;
return;
@@ -254,21 +467,17 @@ pub fn maybe_flush_files_cache(app: &mut AppState) {
///
/// Output:
/// - Writes service cache JSON to `services_cache_path` and clears dirty flag on success.
-/// - If install list is empty, removes the cache file.
+/// - If install list is empty, ensures an empty cache file exists instead of deleting it.
pub fn maybe_flush_services_cache(app: &mut AppState) {
if app.install_list.is_empty() {
- // Clear cache file if install list is empty
- if let Err(e) = fs::remove_file(&app.services_cache_path) {
- tracing::debug!(
- path = %app.services_cache_path.display(),
- error = %e,
- "[Persist] Failed to remove service cache (may not exist)"
- );
- } else {
+ // Write an empty cache file when nothing is queued to keep the path present.
+ if app.services_cache_dirty || !app.services_cache_path.exists() {
+ let empty_signature: Vec = Vec::new();
tracing::debug!(
path = %app.services_cache_path.display(),
- "[Persist] Removed service cache because install list is empty"
+ "[Persist] Writing empty service cache because install list is empty"
);
+ services_cache::save_cache(&app.services_cache_path, &empty_signature, &[]);
}
app.services_cache_dirty = false;
return;
@@ -303,21 +512,17 @@ pub fn maybe_flush_services_cache(app: &mut AppState) {
///
/// Output:
/// - Writes sandbox cache JSON to `sandbox_cache_path` and clears dirty flag on success.
-/// - If install list is empty, removes the cache file.
+/// - If install list is empty, ensures an empty cache file exists instead of deleting it.
pub fn maybe_flush_sandbox_cache(app: &mut AppState) {
if app.install_list.is_empty() {
- // Clear cache file if install list is empty
- if let Err(e) = fs::remove_file(&app.sandbox_cache_path) {
- tracing::debug!(
- path = %app.sandbox_cache_path.display(),
- error = %e,
- "[Persist] Failed to remove sandbox cache (may not exist)"
- );
- } else {
+ // Write an empty cache file when nothing is queued to keep the path present.
+ if app.sandbox_cache_dirty || !app.sandbox_cache_path.exists() {
+ let empty_signature: Vec = Vec::new();
tracing::debug!(
path = %app.sandbox_cache_path.display(),
- "[Persist] Removed sandbox cache because install list is empty"
+ "[Persist] Writing empty sandbox cache because install list is empty"
);
+ sandbox_cache::save_cache(&app.sandbox_cache_path, &empty_signature, &[]);
}
app.sandbox_cache_dirty = false;
return;
@@ -592,17 +797,17 @@ mod tests {
}
#[test]
- /// What: Ensure `maybe_flush_deps_cache` deletes the cache file when the install list is empty.
+ /// What: Ensure `maybe_flush_deps_cache` writes an empty cache file when the install list is empty.
///
/// Inputs:
/// - `AppState` with an empty install list, existing cache file, and `deps_cache_dirty = true`.
///
/// Output:
- /// - Cache file is removed and the dirty flag is cleared.
+ /// - Cache file is replaced with an empty payload and the dirty flag is cleared.
///
/// Details:
/// - Simulates clearing the install list so persistence helper should clean up stale cache content.
- fn flush_deps_cache_removes_when_install_list_empty() {
+ fn flush_deps_cache_writes_empty_when_install_list_empty() {
let mut app = new_app();
let mut path = std::env::temp_dir();
path.push(format!(
@@ -622,7 +827,13 @@ mod tests {
maybe_flush_deps_cache(&mut app);
assert!(!app.deps_cache_dirty);
- assert!(std::fs::metadata(&app.deps_cache_path).is_err());
+ let body = std::fs::read_to_string(&app.deps_cache_path)
+ .expect("Failed to read test deps cache file");
+ let cache: crate::app::deps_cache::DependencyCache =
+ serde_json::from_str(&body).expect("Failed to parse dependency cache");
+ assert!(cache.install_list_signature.is_empty());
+ assert!(cache.dependencies.is_empty());
+ let _ = std::fs::remove_file(&app.deps_cache_path);
}
#[test]
@@ -685,17 +896,17 @@ mod tests {
}
#[test]
- /// What: Ensure `maybe_flush_files_cache` deletes the cache file when the install list is empty.
+ /// What: Ensure `maybe_flush_files_cache` writes an empty cache file when the install list is empty.
///
/// Inputs:
/// - `AppState` with an empty install list, an on-disk cache file, and `files_cache_dirty = true`.
///
/// Output:
- /// - Cache file is removed and the dirty flag resets.
+ /// - Cache file is replaced with an empty payload and the dirty flag resets.
///
/// Details:
/// - Mirrors the behaviour when the user clears the install list to keep disk cache in sync.
- fn flush_files_cache_removes_when_install_list_empty() {
+ fn flush_files_cache_writes_empty_when_install_list_empty() {
let mut app = new_app();
let mut path = std::env::temp_dir();
path.push(format!(
@@ -715,7 +926,93 @@ mod tests {
maybe_flush_files_cache(&mut app);
assert!(!app.files_cache_dirty);
- assert!(std::fs::metadata(&app.files_cache_path).is_err());
+ let body = std::fs::read_to_string(&app.files_cache_path)
+ .expect("Failed to read test files cache file");
+ let cache: crate::app::files_cache::FileCache =
+ serde_json::from_str(&body).expect("Failed to parse file cache");
+ assert!(cache.install_list_signature.is_empty());
+ assert!(cache.files.is_empty());
+ let _ = std::fs::remove_file(&app.files_cache_path);
+ }
+
+ #[test]
+ /// What: Ensure `maybe_flush_services_cache` writes an empty cache file when the install list is empty.
+ ///
+ /// Inputs:
+ /// - `AppState` with an empty install list, an on-disk cache file, and `services_cache_dirty = true`.
+ ///
+ /// Output:
+ /// - Cache file is replaced with an empty payload and the dirty flag resets.
+ ///
+ /// Details:
+ /// - Keeps the cache path present on disk instead of deleting it.
+ fn flush_services_cache_writes_empty_when_install_list_empty() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_services_cache_remove_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.services_cache_path = path.clone();
+ std::fs::write(&app.services_cache_path, "stale")
+ .expect("Failed to write test services cache file");
+ app.services_cache_dirty = true;
+ app.install_list.clear();
+
+ maybe_flush_services_cache(&mut app);
+
+ assert!(!app.services_cache_dirty);
+ let body = std::fs::read_to_string(&app.services_cache_path)
+ .expect("Failed to read test services cache file");
+ let cache: crate::app::services_cache::ServiceCache =
+ serde_json::from_str(&body).expect("Failed to parse service cache");
+ assert!(cache.install_list_signature.is_empty());
+ assert!(cache.services.is_empty());
+ let _ = std::fs::remove_file(&app.services_cache_path);
+ }
+
+ #[test]
+ /// What: Ensure `maybe_flush_sandbox_cache` writes an empty cache file when the install list is empty.
+ ///
+ /// Inputs:
+ /// - `AppState` with an empty install list, an on-disk cache file, and `sandbox_cache_dirty = true`.
+ ///
+ /// Output:
+ /// - Cache file is replaced with an empty payload and the dirty flag resets.
+ ///
+ /// Details:
+ /// - Keeps the cache path present on disk instead of deleting it.
+ fn flush_sandbox_cache_writes_empty_when_install_list_empty() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_sandbox_cache_remove_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.sandbox_cache_path = path.clone();
+ std::fs::write(&app.sandbox_cache_path, "stale")
+ .expect("Failed to write test sandbox cache file");
+ app.sandbox_cache_dirty = true;
+ app.install_list.clear();
+
+ maybe_flush_sandbox_cache(&mut app);
+
+ assert!(!app.sandbox_cache_dirty);
+ let body = std::fs::read_to_string(&app.sandbox_cache_path)
+ .expect("Failed to read test sandbox cache file");
+ let cache: crate::app::sandbox_cache::SandboxCache =
+ serde_json::from_str(&body).expect("Failed to parse sandbox cache");
+ assert!(cache.install_list_signature.is_empty());
+ assert!(cache.sandbox_info.is_empty());
+ let _ = std::fs::remove_file(&app.sandbox_cache_path);
}
#[test]
@@ -751,4 +1048,194 @@ mod tests {
assert!(body.contains("archlinux.org/news"));
let _ = std::fs::remove_file(&app.news_read_path);
}
+
+ #[test]
+ /// What: Ensure `maybe_flush_news_read_ids` persists read IDs and clears the dirty flag.
+ ///
+ /// Inputs:
+ /// - `AppState` providing a temp `news_read_ids_path`, an ID in the set, and `news_read_ids_dirty = true`.
+ ///
+ /// Output:
+ /// - File contains the expected ID and `news_read_ids_dirty` flips to `false`.
+ ///
+ /// Details:
+ /// - Removes the temp artifact to keep tests idempotent across runs.
+ fn flush_news_read_ids_writes_and_clears_flag() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_newsread_ids_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.news_read_ids_path = path.clone();
+ app.news_read_ids.insert("advisory:123".into());
+ app.news_read_ids_dirty = true;
+ maybe_flush_news_read_ids(&mut app);
+ assert!(!app.news_read_ids_dirty);
+ let body = std::fs::read_to_string(&app.news_read_ids_path)
+ .expect("Failed to read test news read ids file");
+ assert!(body.contains("advisory:123"));
+ let _ = std::fs::remove_file(&app.news_read_ids_path);
+ }
+
+ #[test]
+ /// What: Test `maybe_flush_news_read` is no-op when not dirty.
+ ///
+ /// Inputs:
+ /// - `AppState` with `news_read_dirty = false`.
+ ///
+ /// Output:
+ /// - Function returns early without writing or clearing flag.
+ ///
+ /// Details:
+ /// - Verifies dirty flag check prevents unnecessary I/O.
+ fn flush_news_read_noop_when_not_dirty() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_newsread_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.news_read_path = path.clone();
+ app.news_read_urls.insert("https://example.com".into());
+ app.news_read_dirty = false; // Not dirty
+
+ // File should not exist before
+ assert!(!app.news_read_path.exists());
+
+ maybe_flush_news_read(&mut app);
+
+ // File should still not exist (no-op)
+ assert!(!app.news_read_path.exists());
+ assert!(!app.news_read_dirty);
+ }
+
+ #[test]
+ /// What: Test `maybe_flush_news_read_ids` is no-op when not dirty.
+ ///
+ /// Inputs:
+ /// - `AppState` with `news_read_ids_dirty = false`.
+ ///
+ /// Output:
+ /// - Function returns early without writing or clearing flag.
+ ///
+ /// Details:
+ /// - Verifies dirty flag check prevents unnecessary I/O.
+ fn flush_news_read_ids_noop_when_not_dirty() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_newsread_ids_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.news_read_ids_path = path.clone();
+ app.news_read_ids.insert("test-id".into());
+ app.news_read_ids_dirty = false; // Not dirty
+
+ // File should not exist before
+ assert!(!app.news_read_ids_path.exists());
+
+ maybe_flush_news_read_ids(&mut app);
+
+ // File should still not exist (no-op)
+ assert!(!app.news_read_ids_path.exists());
+ assert!(!app.news_read_ids_dirty);
+ }
+
+ #[test]
+ /// What: Test `maybe_flush_news_read` writes valid JSON.
+ ///
+ /// Inputs:
+ /// - `AppState` with multiple URLs in `news_read_urls`.
+ ///
+ /// Output:
+ /// - Written file contains valid JSON array.
+ ///
+ /// Details:
+ /// - Verifies JSON serialization produces parseable output.
+ fn flush_news_read_writes_valid_json() {
+ // Field assignments in tests are acceptable for test setup
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_newsread_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.news_read_path = path.clone();
+ app.news_read_urls
+ .insert("https://example.com/news/1".into());
+ app.news_read_urls
+ .insert("https://example.com/news/2".into());
+ app.news_read_dirty = true;
+
+ maybe_flush_news_read(&mut app);
+
+ let body = std::fs::read_to_string(&app.news_read_path)
+ .expect("Failed to read test news read file");
+ // Verify it's valid JSON
+ let parsed: std::collections::HashSet =
+ serde_json::from_str(&body).expect("Failed to parse JSON");
+ assert_eq!(parsed.len(), 2);
+ assert!(parsed.contains("https://example.com/news/1"));
+ assert!(parsed.contains("https://example.com/news/2"));
+
+ let _ = std::fs::remove_file(&app.news_read_path);
+ }
+
+ #[test]
+ /// What: Test `maybe_flush_news_read_ids` writes valid JSON.
+ ///
+ /// Inputs:
+ /// - `AppState` with multiple IDs in `news_read_ids`.
+ ///
+ /// Output:
+ /// - Written file contains valid JSON array.
+ ///
+ /// Details:
+ /// - Verifies JSON serialization produces parseable output.
+ fn flush_news_read_ids_writes_valid_json() {
+ let mut app = new_app();
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "pacsea_newsread_ids_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ app.news_read_ids_path = path.clone();
+ app.news_read_ids.insert("id-1".into());
+ app.news_read_ids.insert("id-2".into());
+ app.news_read_ids_dirty = true;
+
+ maybe_flush_news_read_ids(&mut app);
+
+ let body = std::fs::read_to_string(&app.news_read_ids_path)
+ .expect("Failed to read test news read ids file");
+ // Verify it's valid JSON
+ let parsed: std::collections::HashSet =
+ serde_json::from_str(&body).expect("Failed to parse JSON");
+ assert_eq!(parsed.len(), 2);
+ assert!(parsed.contains("id-1"));
+ assert!(parsed.contains("id-2"));
+
+ let _ = std::fs::remove_file(&app.news_read_ids_path);
+ }
}
diff --git a/src/app/recent.rs b/src/app/recent.rs
index ec2e4fb47..f77973835 100644
--- a/src/app/recent.rs
+++ b/src/app/recent.rs
@@ -31,6 +31,48 @@ pub fn maybe_save_recent(app: &mut AppState) {
app.recent_dirty = true;
}
+/// What: Debounced persistence of the current news search input into the news Recent list.
+///
+/// Inputs:
+/// - `app`: Mutable application state providing news search text and timing markers
+///
+/// Output:
+/// - Updates `news_recent` (deduped, clamped to capacity), sets `news_recent_dirty`, and records
+/// last-saved value when conditions are met (non-empty, past debounce window, changed since last save).
+pub fn maybe_save_news_recent(app: &mut AppState) {
+ if !matches!(app.app_mode, crate::state::types::AppMode::News) {
+ return;
+ }
+ let now = Instant::now();
+ let query = app.news_search_input.trim();
+ if query.is_empty() {
+ app.news_history_pending = None;
+ app.news_history_pending_at = None;
+ return;
+ }
+
+ // Track pending value and debounce start
+ app.news_history_pending = Some(query.to_string());
+ app.news_history_pending_at = Some(now);
+
+ // Enforce 2s debounce from the last input change
+ if now.duration_since(app.last_input_change) < Duration::from_secs(2) {
+ return;
+ }
+
+ // Avoid duplicate save of the same value
+ if app.news_history_last_saved.as_deref() == Some(query) {
+ return;
+ }
+
+ let value = query.to_string();
+ let key = value.to_ascii_lowercase();
+ app.news_recent.resize(recent_capacity());
+ app.news_recent.put(key, value.clone());
+ app.news_history_last_saved = Some(value);
+ app.news_recent_dirty = true;
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -142,4 +184,28 @@ mod tests {
.all(|entry| entry != "pkg0" && entry != "pkg1")
);
}
+
+ #[test]
+ /// What: Ensure news recent save is debounced and uses news search input.
+ fn news_recent_respects_debounce_and_changes() {
+ let mut app = new_app();
+ app.app_mode = crate::state::types::AppMode::News;
+ app.news_recent.clear();
+
+ // Under debounce: should not save
+ app.news_search_input = "arch".into();
+ app.last_input_change = Instant::now();
+ maybe_save_news_recent(&mut app);
+ assert!(app.news_recent.is_empty());
+
+ // Beyond debounce: should save
+ app.last_input_change = Instant::now()
+ .checked_sub(Duration::from_secs(3))
+ .unwrap_or_else(Instant::now);
+ maybe_save_news_recent(&mut app);
+ let recents: Vec = app.news_recent.iter().map(|(_, v)| v.clone()).collect();
+ assert_eq!(recents.first().map(String::as_str), Some("arch"));
+ assert_eq!(app.news_history_last_saved.as_deref(), Some("arch"));
+ assert!(app.news_recent_dirty);
+ }
}
diff --git a/src/app/runtime/channels.rs b/src/app/runtime/channels.rs
index d30900413..9dbd8e25a 100644
--- a/src/app/runtime/channels.rs
+++ b/src/app/runtime/channels.rs
@@ -4,9 +4,8 @@ use std::sync::atomic::AtomicBool;
use crossterm::event::Event as CEvent;
use tokio::sync::mpsc;
-use crate::state::{
- ArchStatusColor, NewsItem, PackageDetails, PackageItem, QueryInput, SearchResults,
-};
+use crate::state::types::NewsFeedPayload;
+use crate::state::{ArchStatusColor, PackageDetails, PackageItem, QueryInput, SearchResults};
/// What: Channel definitions for runtime communication.
///
@@ -15,153 +14,298 @@ use crate::state::{
/// between the main event loop and background workers
#[allow(dead_code)]
pub struct Channels {
+ /// Sender for terminal events (keyboard/mouse) from the event reading thread.
pub event_tx: mpsc::UnboundedSender,
+ /// Receiver for terminal events in the main event loop.
pub event_rx: mpsc::UnboundedReceiver,
+ /// Atomic flag to signal cancellation of the event reading thread.
pub event_thread_cancelled: Arc,
+ /// Sender for search results from the search worker.
pub search_result_tx: mpsc::UnboundedSender,
+ /// Receiver for search results in the main event loop.
pub results_rx: mpsc::UnboundedReceiver,
+ /// Sender for package details requests to the details worker.
pub details_req_tx: mpsc::UnboundedSender,
+ /// Sender for package details responses from the details worker.
pub details_res_tx: mpsc::UnboundedSender,
+ /// Receiver for package details responses in the main event loop.
pub details_res_rx: mpsc::UnboundedReceiver,
+ /// Sender for tick events to trigger periodic UI updates.
pub tick_tx: mpsc::UnboundedSender<()>,
+ /// Receiver for tick events in the main event loop.
pub tick_rx: mpsc::UnboundedReceiver<()>,
+ /// Sender for network error messages from background workers.
pub net_err_tx: mpsc::UnboundedSender,
+ /// Receiver for network error messages in the main event loop.
pub net_err_rx: mpsc::UnboundedReceiver,
+ /// Sender for preview requests (package details for Recent pane).
pub preview_tx: mpsc::UnboundedSender,
+ /// Receiver for preview responses in the main event loop.
pub preview_rx: mpsc::UnboundedReceiver,
+ /// Sender for adding packages to the install list.
pub add_tx: mpsc::UnboundedSender,
+ /// Receiver for add requests in the install list handler.
pub add_rx: mpsc::UnboundedReceiver,
+ /// Sender for index update notifications.
pub index_notify_tx: mpsc::UnboundedSender<()>,
+ /// Receiver for index update notifications in the main event loop.
pub index_notify_rx: mpsc::UnboundedReceiver<()>,
+ /// Sender for PKGBUILD content requests.
pub pkgb_req_tx: mpsc::UnboundedSender,
+ /// Sender for PKGBUILD content responses (package name, PKGBUILD content).
pub pkgb_res_tx: mpsc::UnboundedSender<(String, String)>,
+ /// Receiver for PKGBUILD content responses in the main event loop.
pub pkgb_res_rx: mpsc::UnboundedReceiver<(String, String)>,
+ /// Sender for AUR comments requests (package name).
pub comments_req_tx: mpsc::UnboundedSender,
+ /// Sender for AUR comments responses (package name, comments or error).
pub comments_res_tx:
mpsc::UnboundedSender<(String, Result, String>)>,
+ /// Receiver for AUR comments responses in the main event loop.
pub comments_res_rx:
mpsc::UnboundedReceiver<(String, Result, String>)>,
+ /// Sender for Arch Linux status updates (status text, color).
pub status_tx: mpsc::UnboundedSender<(String, ArchStatusColor)>,
+ /// Receiver for Arch Linux status updates in the main event loop.
pub status_rx: mpsc::UnboundedReceiver<(String, ArchStatusColor)>,
- pub news_tx: mpsc::UnboundedSender>,
- pub news_rx: mpsc::UnboundedReceiver>,
+ /// Sender for startup news popup items.
+ pub news_tx: mpsc::UnboundedSender>,
+ /// Receiver for startup news popup items in the main event loop.
+ pub news_rx: mpsc::UnboundedReceiver>,
+ /// Sender for news feed items plus last-seen state.
+ pub news_feed_tx: mpsc::UnboundedSender,
+ /// Receiver for news feed payloads in the main event loop.
+ pub news_feed_rx: mpsc::UnboundedReceiver,
+ /// Sender for incremental news items (background continuation).
+ pub news_incremental_tx: mpsc::UnboundedSender,
+ /// Receiver for incremental news items in the main event loop.
+ pub news_incremental_rx: mpsc::UnboundedReceiver,
+ /// Request channel for fetching news article content (URL).
+ pub news_content_req_tx: mpsc::UnboundedSender,
+ /// Response channel for news article content (URL, content).
+ pub news_content_res_rx: mpsc::UnboundedReceiver<(String, String)>,
+ /// Sender for system updates information (count, package names).
pub updates_tx: mpsc::UnboundedSender<(usize, Vec)>,
+ /// Receiver for system updates information in the main event loop.
pub updates_rx: mpsc::UnboundedReceiver<(usize, Vec)>,
+ /// Sender for remote announcements.
pub announcement_tx: mpsc::UnboundedSender,
+ /// Receiver for remote announcements in the main event loop.
pub announcement_rx: mpsc::UnboundedReceiver,
+ /// Sender for dependency resolution requests (packages, action).
pub deps_req_tx:
mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for dependency resolution responses.
pub deps_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for dependency resolution responses in the main event loop.
pub deps_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for file analysis requests (packages, action).
pub files_req_tx:
mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for file analysis responses.
pub files_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for file analysis responses in the main event loop.
pub files_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for service impact analysis requests (packages, action).
pub services_req_tx:
mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for service impact analysis responses.
pub services_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for service impact analysis responses in the main event loop.
pub services_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for sandbox analysis requests (packages).
pub sandbox_req_tx: mpsc::UnboundedSender>,
+ /// Sender for sandbox analysis responses.
pub sandbox_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for sandbox analysis responses in the main event loop.
pub sandbox_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for preflight summary requests (packages, action).
pub summary_req_tx:
mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for preflight summary responses.
pub summary_res_tx: mpsc::UnboundedSender,
+ /// Receiver for preflight summary responses in the main event loop.
pub summary_res_rx: mpsc::UnboundedReceiver,
+ /// Sender for executor requests (install/remove/downgrade operations).
pub executor_req_tx: mpsc::UnboundedSender,
+ /// Receiver for executor responses in the main event loop.
pub executor_res_rx: mpsc::UnboundedReceiver,
+ /// Sender for post-summary computation requests (packages, success flag).
pub post_summary_req_tx: mpsc::UnboundedSender<(Vec, Option)>,
+ /// Receiver for post-summary computation results in the main event loop.
pub post_summary_res_rx: mpsc::UnboundedReceiver,
+ /// Sender for search queries to the search worker.
pub query_tx: mpsc::UnboundedSender,
}
/// What: Event channel pair and cancellation flag.
struct EventChannels {
+ /// Sender for terminal events.
tx: mpsc::UnboundedSender,
+ /// Receiver for terminal events.
rx: mpsc::UnboundedReceiver,
+ /// Cancellation flag for event thread.
cancelled: Arc,
}
/// What: Search-related channels.
struct SearchChannels {
+ /// Sender for search results.
result_tx: mpsc::UnboundedSender,
+ /// Receiver for search results.
results_rx: mpsc::UnboundedReceiver,
+ /// Sender for search queries.
query_tx: mpsc::UnboundedSender,
+ /// Receiver for search queries.
query_rx: mpsc::UnboundedReceiver,
}
/// What: Package details channels.
struct DetailsChannels {
+ /// Sender for package details requests.
req_tx: mpsc::UnboundedSender,
+ /// Receiver for package details requests.
req_rx: mpsc::UnboundedReceiver,
+ /// Sender for package details responses.
res_tx: mpsc::UnboundedSender,
+ /// Receiver for package details responses.
res_rx: mpsc::UnboundedReceiver,
}
/// What: Preflight-related channels (dependencies, files, services, sandbox, summary).
struct PreflightChannels {
+ /// Sender for dependency resolution requests.
deps_req_tx: mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Receiver for dependency resolution requests.
deps_req_rx: mpsc::UnboundedReceiver<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for dependency resolution responses.
deps_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for dependency resolution responses.
deps_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for file analysis requests.
files_req_tx: mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Receiver for file analysis requests.
files_req_rx: mpsc::UnboundedReceiver<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for file analysis responses.
files_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for file analysis responses.
files_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for service impact analysis requests.
services_req_tx:
mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Receiver for service impact analysis requests.
services_req_rx:
mpsc::UnboundedReceiver<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for service impact analysis responses.
services_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for service impact analysis responses.
services_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for sandbox analysis requests.
sandbox_req_tx: mpsc::UnboundedSender>,
+ /// Receiver for sandbox analysis requests.
sandbox_req_rx: mpsc::UnboundedReceiver>,
+ /// Sender for sandbox analysis responses.
sandbox_res_tx: mpsc::UnboundedSender>,
+ /// Receiver for sandbox analysis responses.
sandbox_res_rx: mpsc::UnboundedReceiver>,
+ /// Sender for preflight summary requests.
summary_req_tx: mpsc::UnboundedSender<(Vec, crate::state::modal::PreflightAction)>,
+ /// Receiver for preflight summary requests.
summary_req_rx:
mpsc::UnboundedReceiver<(Vec, crate::state::modal::PreflightAction)>,
+ /// Sender for preflight summary responses.
summary_res_tx: mpsc::UnboundedSender,
+ /// Receiver for preflight summary responses.
summary_res_rx: mpsc::UnboundedReceiver,
}
/// What: Utility channels (tick, network errors, preview, add, index notify, PKGBUILD, status, news).
struct UtilityChannels {
+ /// Sender for tick events.
tick_tx: mpsc::UnboundedSender<()>,
+ /// Receiver for tick events.
tick_rx: mpsc::UnboundedReceiver<()>,
+ /// Sender for network error messages.
net_err_tx: mpsc::UnboundedSender,
+ /// Receiver for network error messages.
net_err_rx: mpsc::UnboundedReceiver,
+ /// Sender for preview requests.
preview_tx: mpsc::UnboundedSender,
+ /// Receiver for preview requests.
preview_rx: mpsc::UnboundedReceiver,
+ /// Sender for add to install list requests.
add_tx: mpsc::UnboundedSender,
+ /// Receiver for add to install list requests.
add_rx: mpsc::UnboundedReceiver,
+ /// Sender for index update notifications.
index_notify_tx: mpsc::UnboundedSender<()>,
+ /// Receiver for index update notifications.
index_notify_rx: mpsc::UnboundedReceiver<()>,
+ /// Sender for PKGBUILD requests.
pkgb_req_tx: mpsc::UnboundedSender,
+ /// Receiver for PKGBUILD requests.
pkgb_req_rx: mpsc::UnboundedReceiver,
+ /// Sender for PKGBUILD responses.
pkgb_res_tx: mpsc::UnboundedSender<(String, String)>,
+ /// Receiver for PKGBUILD responses.
pkgb_res_rx: mpsc::UnboundedReceiver<(String, String)>,
+ /// Sender for AUR comments requests.
comments_req_tx: mpsc::UnboundedSender,
+ /// Receiver for AUR comments requests.
comments_req_rx: mpsc::UnboundedReceiver,
+ /// Sender for AUR comments responses.
comments_res_tx:
mpsc::UnboundedSender<(String, Result, String>)>,
+ /// Receiver for AUR comments responses.
comments_res_rx:
mpsc::UnboundedReceiver<(String, Result, String>)>,
+ /// Sender for Arch Linux status updates.
status_tx: mpsc::UnboundedSender<(String, ArchStatusColor)>,
+ /// Receiver for Arch Linux status updates.
status_rx: mpsc::UnboundedReceiver<(String, ArchStatusColor)>,
- news_tx: mpsc::UnboundedSender>,
- news_rx: mpsc::UnboundedReceiver>,
+ /// Sender for startup news popup items.
+ news_tx: mpsc::UnboundedSender>,
+ /// Receiver for startup news popup items.
+ news_rx: mpsc::UnboundedReceiver>,
+ /// Sender for news feed payloads.
+ news_feed_tx: mpsc::UnboundedSender,
+ /// Receiver for news feed payloads.
+ news_feed_rx: mpsc::UnboundedReceiver,
+ /// Sender for incremental news items.
+ news_incremental_tx: mpsc::UnboundedSender,
+ /// Receiver for incremental news items.
+ news_incremental_rx: mpsc::UnboundedReceiver,
+ /// Sender for news article content requests.
+ news_content_req_tx: mpsc::UnboundedSender,
+ /// Receiver for news article content requests.
+ news_content_req_rx: mpsc::UnboundedReceiver,
+ /// Sender for news article content responses.
+ news_content_res_tx: mpsc::UnboundedSender<(String, String)>,
+ /// Receiver for news article content responses.
+ news_content_res_rx: mpsc::UnboundedReceiver<(String, String)>,
+ /// Sender for system updates information.
updates_tx: mpsc::UnboundedSender<(usize, Vec)>,
+ /// Receiver for system updates information.
updates_rx: mpsc::UnboundedReceiver<(usize, Vec)>,
+ /// Sender for remote announcements.
announcement_tx: mpsc::UnboundedSender,
+ /// Receiver for remote announcements.
announcement_rx: mpsc::UnboundedReceiver,
+ /// Sender for executor requests.
executor_req_tx: mpsc::UnboundedSender,
+ /// Receiver for executor requests.
executor_req_rx: mpsc::UnboundedReceiver,
+ /// Sender for executor responses.
executor_res_tx: mpsc::UnboundedSender,
+ /// Receiver for executor responses.
executor_res_rx: mpsc::UnboundedReceiver,
+ /// Sender for post-summary computation requests.
post_summary_req_tx: mpsc::UnboundedSender<(Vec, Option)>,
+ /// Receiver for post-summary computation requests.
post_summary_req_rx: mpsc::UnboundedReceiver<(Vec, Option)>,
+ /// Sender for post-summary computation results.
post_summary_res_tx: mpsc::UnboundedSender,
+ /// Receiver for post-summary computation results.
post_summary_res_rx: mpsc::UnboundedReceiver,
}
@@ -269,7 +413,12 @@ fn create_utility_channels() -> UtilityChannels {
let (comments_res_tx, comments_res_rx) =
mpsc::unbounded_channel::<(String, Result, String>)>();
let (status_tx, status_rx) = mpsc::unbounded_channel::<(String, ArchStatusColor)>();
- let (news_tx, news_rx) = mpsc::unbounded_channel::>();
+ let (news_tx, news_rx) = mpsc::unbounded_channel::>();
+ let (news_feed_tx, news_feed_rx) = mpsc::unbounded_channel::();
+ let (news_incremental_tx, news_incremental_rx) =
+ mpsc::unbounded_channel::();
+ let (news_content_req_tx, news_content_req_rx) = mpsc::unbounded_channel::();
+ let (news_content_res_tx, news_content_res_rx) = mpsc::unbounded_channel::<(String, String)>();
let (updates_tx, updates_rx) = mpsc::unbounded_channel::<(usize, Vec)>();
let (announcement_tx, announcement_rx) =
mpsc::unbounded_channel::();
@@ -304,6 +453,14 @@ fn create_utility_channels() -> UtilityChannels {
status_rx,
news_tx,
news_rx,
+ news_feed_tx,
+ news_feed_rx,
+ news_incremental_tx,
+ news_incremental_rx,
+ news_content_req_tx,
+ news_content_req_rx,
+ news_content_res_tx,
+ news_content_res_rx,
updates_tx,
updates_rx,
announcement_tx,
@@ -348,6 +505,10 @@ impl Channels {
utility_channels.comments_req_rx,
utility_channels.comments_res_tx.clone(),
);
+ crate::app::runtime::workers::news_content::spawn_news_content_worker(
+ utility_channels.news_content_req_rx,
+ utility_channels.news_content_res_tx.clone(),
+ );
crate::app::runtime::workers::preflight::spawn_dependency_worker(
preflight_channels.deps_req_rx,
preflight_channels.deps_res_tx.clone(),
@@ -412,6 +573,12 @@ impl Channels {
status_rx: utility_channels.status_rx,
news_tx: utility_channels.news_tx,
news_rx: utility_channels.news_rx,
+ news_feed_tx: utility_channels.news_feed_tx,
+ news_feed_rx: utility_channels.news_feed_rx,
+ news_incremental_tx: utility_channels.news_incremental_tx,
+ news_incremental_rx: utility_channels.news_incremental_rx,
+ news_content_req_tx: utility_channels.news_content_req_tx,
+ news_content_res_rx: utility_channels.news_content_res_rx,
updates_tx: utility_channels.updates_tx,
updates_rx: utility_channels.updates_rx,
announcement_tx: utility_channels.announcement_tx,
diff --git a/src/app/runtime/cleanup.rs b/src/app/runtime/cleanup.rs
index 02189e8ab..918410e51 100644
--- a/src/app/runtime/cleanup.rs
+++ b/src/app/runtime/cleanup.rs
@@ -2,7 +2,9 @@ use crate::state::AppState;
use super::super::persist::{
maybe_flush_announcement_read, maybe_flush_cache, maybe_flush_deps_cache,
- maybe_flush_files_cache, maybe_flush_install, maybe_flush_news_read,
+ maybe_flush_files_cache, maybe_flush_install, maybe_flush_news_bookmarks,
+ maybe_flush_news_content_cache, maybe_flush_news_read, maybe_flush_news_read_ids,
+ maybe_flush_news_recent, maybe_flush_news_seen_aur_comments, maybe_flush_news_seen_versions,
maybe_flush_pkgbuild_parse_cache, maybe_flush_recent, maybe_flush_sandbox_cache,
maybe_flush_services_cache,
};
@@ -58,7 +60,13 @@ pub fn cleanup_on_exit(app: &mut AppState, channels: &Channels) {
maybe_flush_cache(app);
maybe_flush_recent(app);
+ maybe_flush_news_recent(app);
+ maybe_flush_news_bookmarks(app);
+ maybe_flush_news_content_cache(app);
maybe_flush_news_read(app);
+ maybe_flush_news_read_ids(app);
+ maybe_flush_news_seen_versions(app);
+ maybe_flush_news_seen_aur_comments(app);
maybe_flush_announcement_read(app);
maybe_flush_install(app);
maybe_flush_deps_cache(app);
diff --git a/src/app/runtime/event_loop.rs b/src/app/runtime/event_loop.rs
index dc346ba08..f115f6def 100644
--- a/src/app/runtime/event_loop.rs
+++ b/src/app/runtime/event_loop.rs
@@ -2,9 +2,11 @@ use ratatui::Terminal;
use tokio::select;
use crate::i18n;
+use crate::state::types::NewsFeedPayload;
use crate::state::{AppState, PackageItem};
use crate::ui::ui;
use crate::util::parse_update_entry;
+use tracing::info;
use super::background::Channels;
use super::handlers::{
@@ -219,6 +221,153 @@ fn handle_updates_list(app: &mut AppState, count: usize, list: Vec) {
}
}
+/// What: Apply filters and sorting to news feed items.
+///
+/// Inputs:
+/// - `app`: Application state containing news feed data and filter flags.
+/// - `payload`: News feed payload containing items and metadata.
+///
+/// Details:
+/// - Does not clear `news_loading` flag here - it will be cleared when news modal is shown.
+fn handle_news_feed_items(app: &mut AppState, payload: NewsFeedPayload) {
+ tracing::info!(
+ items_count = payload.items.len(),
+ "received aggregated news feed payload in event loop"
+ );
+ app.news_items = payload.items;
+ app.news_seen_pkg_versions = payload.seen_pkg_versions;
+ app.news_seen_pkg_versions_dirty = true;
+ app.news_seen_aur_comments = payload.seen_aur_comments;
+ app.news_seen_aur_comments_dirty = true;
+ match serde_json::to_string_pretty(&app.news_items) {
+ Ok(serialized) => {
+ if let Err(e) = std::fs::write(&app.news_feed_path, serialized) {
+ tracing::warn!(error = %e, path = ?app.news_feed_path, "failed to persist news feed cache");
+ }
+ }
+ Err(e) => tracing::warn!(error = %e, "failed to serialize news feed cache"),
+ }
+ app.refresh_news_results();
+
+ // News feed is now loaded - clear loading flag and toast
+ app.news_loading = false;
+ app.toast_message = None;
+ app.toast_expires_at = None;
+
+ info!(
+ fetched = app.news_items.len(),
+ visible = app.news_results.len(),
+ max_age_days = app.news_max_age_days.map(i64::from),
+ installed_only = app.news_filter_installed_only,
+ arch_on = app.news_filter_show_arch_news,
+ advisories_on = app.news_filter_show_advisories,
+ "news feed updated"
+ );
+ // Check for network errors and show a small toast
+ if crate::sources::take_network_error() {
+ app.toast_message = Some("Network error: some news sources unreachable".to_string());
+ app.toast_expires_at = Some(std::time::Instant::now() + std::time::Duration::from_secs(5));
+ }
+}
+
+/// What: Handle a single incremental news item from background continuation.
+///
+/// Inputs:
+/// - `app`: Application state
+/// - `item`: The news feed item to add
+///
+/// Details:
+/// - Appends the item to `news_items` if not already present (by id).
+/// - Refreshes filtered/sorted results.
+/// - Persists the updated feed cache to disk.
+fn handle_incremental_news_item(app: &mut AppState, item: crate::state::types::NewsFeedItem) {
+ // Check if item already exists (by id)
+ if app.news_items.iter().any(|existing| existing.id == item.id) {
+ tracing::debug!(
+ item_id = %item.id,
+ "incremental news item already exists, skipping"
+ );
+ return;
+ }
+
+ tracing::info!(
+ item_id = %item.id,
+ source = ?item.source,
+ title = %item.title,
+ "received incremental news item"
+ );
+
+ // Add the new item
+ app.news_items.push(item);
+
+ // Refresh filtered/sorted results
+ app.refresh_news_results();
+
+ // Persist to disk
+ if let Ok(serialized) = serde_json::to_string_pretty(&app.news_items)
+ && let Err(e) = std::fs::write(&app.news_feed_path, serialized)
+ {
+ tracing::warn!(error = %e, path = ?app.news_feed_path, "failed to persist incremental news feed cache");
+ }
+}
+
+/// What: Handle news article content response.
+///
+/// Inputs:
+/// - `app`: Application state
+/// - `url`: The URL that was fetched
+/// - `content`: The article content
+fn handle_news_content(app: &mut AppState, url: &str, content: String) {
+ // Only cache successful content, not error messages
+ // Error messages start with "Failed to load content:" and should not be persisted
+ let is_error = content.starts_with("Failed to load content:");
+ if is_error {
+ tracing::debug!(
+ url,
+ "news_content: not caching error response to allow retry"
+ );
+ } else {
+ app.news_content_cache
+ .insert(url.to_string(), content.clone());
+ app.news_content_cache_dirty = true;
+ }
+
+ // Update displayed content if this is for the currently selected item
+ if let Some(selected_url) = app
+ .news_results
+ .get(app.news_selected)
+ .and_then(|selected| selected.url.as_deref())
+ && selected_url == url
+ {
+ tracing::debug!(
+ url,
+ len = content.len(),
+ selected = app.news_selected,
+ "news_content: response matches selection"
+ );
+ app.news_content_loading = false;
+ app.news_content = if content.is_empty() {
+ None
+ } else {
+ Some(content)
+ };
+ } else {
+ // Clear loading flag even if selection changed; a new request will be issued on next tick.
+ tracing::debug!(
+ url,
+ len = content.len(),
+ selected = app.news_selected,
+ selected_url = ?app
+ .news_results
+ .get(app.news_selected)
+ .and_then(|selected| selected.url.as_deref()),
+ "news_content: response does not match current selection"
+ );
+ app.news_content_loading = false;
+ }
+ app.news_content_loading_since = None;
+}
+
/// What: Process one iteration of channel message handling.
///
/// Inputs:
@@ -231,6 +380,7 @@ fn handle_updates_list(app: &mut AppState, count: usize, list: Vec) {
/// - Waits for and processes a single message from any channel
/// - Returns `true` when an event handler indicates exit (e.g., quit command)
/// - Uses select! to wait on multiple channels concurrently
+#[allow(clippy::cognitive_complexity)]
async fn process_channel_messages(app: &mut AppState, channels: &mut Channels) -> bool {
select! {
Some(ev) = channels.event_rx.recv() => {
@@ -297,6 +447,18 @@ async fn process_channel_messages(app: &mut AppState, channels: &mut Channels) -
handle_comments_result(app, pkgname, result, &channels.tick_tx);
false
}
+ Some(feed) = channels.news_feed_rx.recv() => {
+ handle_news_feed_items(app, feed);
+ false
+ }
+ Some(item) = channels.news_incremental_rx.recv() => {
+ handle_incremental_news_item(app, item);
+ false
+ }
+ Some((url, content)) = channels.news_content_res_rx.recv() => {
+ handle_news_content(app, &url, content);
+ false
+ }
Some(msg) = channels.net_err_rx.recv() => {
tracing::warn!(error = %msg, "Network error received");
#[cfg(not(windows))]
@@ -323,11 +485,22 @@ async fn process_channel_messages(app: &mut AppState, channels: &mut Channels) -
&channels.updates_tx,
&channels.executor_req_tx,
&channels.post_summary_req_tx,
+ &channels.news_content_req_tx,
);
false
}
- Some(todays) = channels.news_rx.recv() => {
- handle_news(app, &todays);
+ Some(items) = channels.news_rx.recv() => {
+ tracing::info!(
+ items_count = items.len(),
+ news_loading_before = app.news_loading,
+ "received news items from channel"
+ );
+ handle_news(app, &items);
+ tracing::info!(
+ news_loading_after = app.news_loading,
+ modal = ?app.modal,
+ "handle_news completed"
+ );
false
}
Some(announcement) = channels.announcement_rx.recv() => {
@@ -502,7 +675,7 @@ fn handle_downgrade_success(app: &mut AppState, items: &[crate::state::PackageIt
/// - Processes `Line`, `ReplaceLastLine`, `Finished`, and `Error` outputs
/// - Handles success/failure cases for Install, Remove, and Downgrade actions
/// - Shows confirmation popup for AUR update when pacman fails
-#[allow(clippy::too_many_lines)] // Function handles multiple executor output types and modal transitions
+#[allow(clippy::too_many_lines)] // Function handles multiple executor output types and modal transitions (function has 187 lines)
fn handle_executor_output(app: &mut AppState, output: crate::install::ExecutorOutput) {
// Log what we received (at trace level to avoid spam)
match &output {
@@ -691,6 +864,236 @@ fn handle_executor_output(app: &mut AppState, output: crate::install::ExecutorOu
}
}
+/// What: Trigger startup news fetch using current startup news settings.
+///
+/// Inputs:
+/// - `channels`: Communication channels for background workers
+/// - `app`: Application state for read sets
+///
+/// Output: None
+///
+/// Details:
+/// - Fetches news feed using startup news settings and sends to `news_tx` channel
+/// - Called when `trigger_startup_news_fetch` flag is set after `NewsSetup` completion
+/// - Sets `news_loading` flag to show loading modal
+fn trigger_startup_news_fetch(channels: &Channels, app: &mut AppState) {
+ use crate::sources;
+ use crate::state::types::NewsSortMode;
+ use std::collections::HashSet;
+
+ let prefs = crate::theme::settings();
+ if !prefs.startup_news_configured {
+ return;
+ }
+
+ // Set loading flag to show loading modal
+ app.news_loading = true;
+ tracing::info!("news_loading set to true, triggering startup news fetch");
+
+ let news_tx = channels.news_tx.clone();
+ let read_urls = app.news_read_urls.clone();
+ let read_ids = app.news_read_ids.clone();
+ let installed: HashSet = crate::index::explicit_names().into_iter().collect();
+ // Create mutable copies for the fetch (won't be persisted, but needed for API)
+ let mut seen_versions = app.news_seen_pkg_versions.clone();
+ let mut seen_aur_comments = app.news_seen_aur_comments.clone();
+
+ tokio::spawn(async move {
+ tracing::info!("on-demand startup news fetch task started");
+ let mut installed_set = installed;
+ if installed_set.is_empty() {
+ crate::index::refresh_installed_cache().await;
+ crate::index::refresh_explicit_cache(crate::state::InstalledPackagesMode::AllExplicit)
+ .await;
+ let refreshed: HashSet = crate::index::explicit_names().into_iter().collect();
+ if !refreshed.is_empty() {
+ installed_set = refreshed;
+ }
+ }
+ let include_pkg_updates =
+ prefs.startup_news_show_pkg_updates || prefs.startup_news_show_aur_updates;
+ // Use lower limit for startup popup (20) vs main feed (50)
+ // If both official and AUR updates are requested, double the limit so both types can be included
+ #[allow(clippy::items_after_statements)]
+ const STARTUP_NEWS_LIMIT: usize = 20;
+ let updates_limit =
+ if prefs.startup_news_show_pkg_updates && prefs.startup_news_show_aur_updates {
+ STARTUP_NEWS_LIMIT * 2
+ } else {
+ STARTUP_NEWS_LIMIT
+ };
+ let ctx = sources::NewsFeedContext {
+ force_emit_all: true,
+ updates_list_path: Some(crate::theme::lists_dir().join("available_updates.txt")),
+ limit: updates_limit,
+ include_arch_news: prefs.startup_news_show_arch_news,
+ include_advisories: prefs.startup_news_show_advisories,
+ include_pkg_updates,
+ include_aur_comments: prefs.startup_news_show_aur_comments,
+ installed_filter: Some(&installed_set),
+ installed_only: false,
+ sort_mode: NewsSortMode::DateDesc,
+ seen_pkg_versions: &mut seen_versions,
+ seen_aur_comments: &mut seen_aur_comments,
+ max_age_days: prefs.startup_news_max_age_days,
+ };
+ tracing::info!(
+ limit = updates_limit,
+ include_arch_news = prefs.startup_news_show_arch_news,
+ include_advisories = prefs.startup_news_show_advisories,
+ include_pkg_updates,
+ include_aur_comments = prefs.startup_news_show_aur_comments,
+ max_age_days = ?prefs.startup_news_max_age_days,
+ installed_count = installed_set.len(),
+ "starting on-demand startup news fetch"
+ );
+ match sources::fetch_news_feed(ctx).await {
+ Ok(feed) => {
+ tracing::info!(
+ total_items = feed.len(),
+ "on-demand startup news fetch completed successfully"
+ );
+ // Filter by source type for package updates (AUR vs official are mixed in fetch_installed_updates)
+ let source_filtered: Vec = feed
+ .into_iter()
+ .filter(|item| match item.source {
+ crate::state::types::NewsFeedSource::ArchNews => {
+ prefs.startup_news_show_arch_news
+ }
+ crate::state::types::NewsFeedSource::SecurityAdvisory => {
+ prefs.startup_news_show_advisories
+ }
+ crate::state::types::NewsFeedSource::InstalledPackageUpdate => {
+ prefs.startup_news_show_pkg_updates
+ }
+ crate::state::types::NewsFeedSource::AurPackageUpdate => {
+ prefs.startup_news_show_aur_updates
+ }
+ crate::state::types::NewsFeedSource::AurComment => {
+ prefs.startup_news_show_aur_comments
+ }
+ })
+ .collect();
+ // Filter by max age days
+ let filtered: Vec =
+ if let Some(max_days) = prefs.startup_news_max_age_days {
+ let cutoff_date = chrono::Utc::now()
+ .checked_sub_signed(chrono::Duration::days(i64::from(max_days)))
+ .map(|dt| dt.format("%Y-%m-%d").to_string());
+ #[allow(clippy::unnecessary_map_or)]
+ let filtered_items = source_filtered
+ .into_iter()
+ .filter(|item| {
+ cutoff_date
+ .as_ref()
+ .map_or(true, |cutoff| &item.date >= cutoff)
+ })
+ .collect();
+ filtered_items
+ } else {
+ source_filtered
+ };
+ // Filter out already-read items
+ #[allow(clippy::unnecessary_map_or)]
+ let unread: Vec = filtered
+ .into_iter()
+ .filter(|item| {
+ !read_ids.contains(&item.id)
+ && item.url.as_ref().is_none_or(|url| !read_urls.contains(url))
+ })
+ .collect();
+ tracing::info!(
+ unread_count = unread.len(),
+ "sending on-demand startup news items to channel"
+ );
+ match news_tx.send(unread) {
+ Ok(()) => {
+ tracing::info!("on-demand startup news items sent to channel successfully");
+ }
+ Err(e) => {
+ tracing::error!(
+ error = %e,
+ "failed to send on-demand startup news items to channel (receiver dropped?)"
+ );
+ }
+ }
+ }
+ Err(e) => {
+ tracing::warn!(error = %e, "on-demand startup news fetch failed");
+ tracing::info!("sending empty array to clear loading flag after fetch error");
+ let _ = news_tx.send(Vec::new());
+ }
+ }
+ });
+}
+
+#[cfg(test)]
+mod startup_news_tests {
+ use crate::state::types::{NewsFeedItem, NewsFeedSource};
+ use std::collections::HashSet;
+
+ #[test]
+ /// What: Test filtering logic for already-read news items.
+ ///
+ /// Inputs:
+ /// - News items with some marked as read (by ID and URL).
+ ///
+ /// Output:
+ /// - Only unread items returned.
+ ///
+ /// Details:
+ /// - Verifies read filtering excludes items by both ID and URL.
+ fn test_filter_already_read_items() {
+ let read_ids: HashSet = HashSet::from(["id-1".to_string()]);
+
+ let read_urls: HashSet = HashSet::from(["https://example.com/news/2".to_string()]);
+
+ let items = vec![
+ NewsFeedItem {
+ id: "id-1".to_string(),
+ date: "2025-01-01".to_string(),
+ title: "Item 1".to_string(),
+ summary: None,
+ url: Some("https://example.com/news/1".to_string()),
+ source: NewsFeedSource::ArchNews,
+ severity: None,
+ packages: Vec::new(),
+ },
+ NewsFeedItem {
+ id: "id-2".to_string(),
+ date: "2025-01-02".to_string(),
+ title: "Item 2".to_string(),
+ summary: None,
+ url: Some("https://example.com/news/2".to_string()),
+ source: NewsFeedSource::ArchNews,
+ severity: None,
+ packages: Vec::new(),
+ },
+ NewsFeedItem {
+ id: "id-3".to_string(),
+ date: "2025-01-03".to_string(),
+ title: "Item 3".to_string(),
+ summary: None,
+ url: Some("https://example.com/news/3".to_string()),
+ source: NewsFeedSource::ArchNews,
+ severity: None,
+ packages: Vec::new(),
+ },
+ ];
+
+ let unread: Vec = items
+ .into_iter()
+ .filter(|item| {
+ !read_ids.contains(&item.id)
+ && item.url.as_ref().is_none_or(|url| !read_urls.contains(url))
+ })
+ .collect();
+
+ assert_eq!(unread.len(), 1);
+ assert_eq!(unread[0].id, "id-3");
+ }
+}
+
/// What: Run the main event loop, processing all channel messages and rendering the UI.
///
/// Inputs:
@@ -704,12 +1107,19 @@ fn handle_executor_output(app: &mut AppState, output: crate::install::ExecutorOu
/// - Renders UI frames and handles all channel messages (events, search results, details,
/// preflight data, PKGBUILD, news, status, etc.)
/// - Exits when event handler returns true (e.g., quit command)
+/// - Checks for `trigger_startup_news_fetch` flag and triggers fetch if set
pub async fn run_event_loop(
terminal: &mut Option>>,
app: &mut AppState,
channels: &mut Channels,
) {
loop {
+ // Check if we need to trigger startup news fetch
+ if app.trigger_startup_news_fetch {
+ app.trigger_startup_news_fetch = false;
+ trigger_startup_news_fetch(channels, &mut *app);
+ }
+
if let Some(t) = terminal.as_mut() {
let _ = t.draw(|f| ui(f, app));
}
@@ -719,3 +1129,90 @@ pub async fn run_event_loop(
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::handle_news_content;
+ use crate::state::AppState;
+ use crate::state::types::{NewsFeedItem, NewsFeedSource};
+
+ /// What: Build a minimal `NewsFeedItem` for news content tests.
+ ///
+ /// Inputs:
+ /// - `id`: Stable identifier for the item.
+ /// - `url`: URL to associate with the item.
+ ///
+ /// Output:
+ /// - `NewsFeedItem` with Arch news source and empty optional fields.
+ ///
+ /// Details:
+ /// - Uses a fixed date to keep assertions deterministic.
+ fn make_news_item(id: &str, url: &str) -> NewsFeedItem {
+ NewsFeedItem {
+ id: id.to_string(),
+ date: "2024-01-01".to_string(),
+ title: format!("Title {id}"),
+ summary: None,
+ url: Some(url.to_string()),
+ source: NewsFeedSource::ArchNews,
+ severity: None,
+ packages: Vec::new(),
+ }
+ }
+
+ #[test]
+ /// What: Ensure stale news content responses do not clear loading for the active selection.
+ ///
+ /// Inputs:
+ /// - App with selection on item `b` and loading flagged true.
+ /// - Content response for outdated item `a`.
+ ///
+ /// Output:
+ /// - `news_content_loading` remains true and displayed content stays `None`.
+ ///
+ /// Details:
+ /// - Prevents stale responses from cancelling the fetch for the current item.
+ fn handle_news_content_keeps_loading_for_mismatched_url() {
+ let mut app = AppState {
+ news_results: vec![
+ make_news_item("a", "https://example.com/a"),
+ make_news_item("b", "https://example.com/b"),
+ ],
+ news_selected: 1,
+ news_content_loading: true,
+ ..AppState::default()
+ };
+
+ handle_news_content(&mut app, "https://example.com/a", "old".to_string());
+
+ assert!(!app.news_content_loading);
+ assert!(app.news_content.is_none());
+ assert!(app.news_content_cache.contains_key("https://example.com/a"));
+ }
+
+ #[test]
+ /// What: Ensure news content responses for the selected item clear loading and set content.
+ ///
+ /// Inputs:
+ /// - App with selection on item `a` and loading flagged true.
+ /// - Content response for the same item.
+ ///
+ /// Output:
+ /// - Loading flag clears and content is stored.
+ ///
+ /// Details:
+ /// - Confirms the happy path still updates UI state correctly.
+ fn handle_news_content_updates_current_selection() {
+ let mut app = AppState {
+ news_results: vec![make_news_item("a", "https://example.com/a")],
+ news_content_loading: true,
+ ..AppState::default()
+ };
+
+ handle_news_content(&mut app, "https://example.com/a", "payload".to_string());
+
+ assert!(!app.news_content_loading);
+ assert_eq!(app.news_content, Some("payload".to_string()));
+ assert!(app.news_content_cache.contains_key("https://example.com/a"));
+ }
+}
diff --git a/src/app/runtime/handlers/mod.rs b/src/app/runtime/handlers/mod.rs
index 28c6617c3..e148767f5 100644
--- a/src/app/runtime/handlers/mod.rs
+++ b/src/app/runtime/handlers/mod.rs
@@ -1,9 +1,15 @@
+/// Common utilities for channel handlers.
mod common;
+/// Handler for file analysis results.
pub mod files;
+/// Handler for install list and dependency results.
pub mod install;
+/// Handler for sandbox analysis results.
pub mod sandbox;
+/// Handler for search results and details updates.
pub mod search;
+/// Handler for service impact analysis results.
pub mod services;
pub use files::handle_file_result;
diff --git a/src/app/runtime/init.rs b/src/app/runtime/init.rs
index c192332ab..fca67b041 100644
--- a/src/app/runtime/init.rs
+++ b/src/app/runtime/init.rs
@@ -1,5 +1,4 @@
-use std::collections::HashMap;
-use std::time::Instant;
+use std::{collections::HashMap, fs, path::Path, time::Instant};
use crate::index as pkgindex;
use crate::state::{AppState, PackageDetails, PackageItem};
@@ -138,9 +137,13 @@ pub fn initialize_locale_system(
/// - Checks for GNOME terminal if on GNOME desktop
#[allow(clippy::struct_excessive_bools)]
pub struct InitFlags {
+ /// Whether dependency resolution is needed (cache missing or invalid).
pub needs_deps_resolution: bool,
+ /// Whether file analysis is needed (cache missing or invalid).
pub needs_files_resolution: bool,
+ /// Whether service analysis is needed (cache missing or invalid).
pub needs_services_resolution: bool,
+ /// Whether sandbox analysis is needed (cache missing or invalid).
pub needs_sandbox_resolution: bool,
}
@@ -183,6 +186,79 @@ fn load_cache_with_signature(
)
}
+/// What: Ensure cache directories exist before writing placeholder files.
+///
+/// Inputs:
+/// - `path`: Target cache file path whose parent directory should exist.
+///
+/// Output:
+/// - Parent directory is created if missing; logs a warning on failure.
+///
+/// Details:
+/// - No-op when the path has no parent.
+fn ensure_cache_parent_dir(path: &Path) {
+ if let Some(parent) = path.parent()
+ && let Err(error) = fs::create_dir_all(parent)
+ {
+ tracing::warn!(
+ path = %parent.display(),
+ %error,
+ "[Init] Failed to create cache directory"
+ );
+ }
+}
+
+/// What: Create empty cache files at startup so they always exist on disk.
+///
+/// Inputs:
+/// - `app`: Application state providing cache paths.
+///
+/// Output:
+/// - Writes empty dependency, file, service, and sandbox caches if the files are missing.
+///
+/// Details:
+/// - Uses empty signatures and payloads; leaves existing files untouched.
+/// - Ensures parent directories exist before writing.
+fn initialize_cache_files(app: &AppState) {
+ let empty_signature: Vec = Vec::new();
+
+ if !app.deps_cache_path.exists() {
+ ensure_cache_parent_dir(&app.deps_cache_path);
+ deps_cache::save_cache(&app.deps_cache_path, &empty_signature, &[]);
+ tracing::debug!(
+ path = %app.deps_cache_path.display(),
+ "[Init] Created empty dependency cache"
+ );
+ }
+
+ if !app.files_cache_path.exists() {
+ ensure_cache_parent_dir(&app.files_cache_path);
+ files_cache::save_cache(&app.files_cache_path, &empty_signature, &[]);
+ tracing::debug!(
+ path = %app.files_cache_path.display(),
+ "[Init] Created empty file cache"
+ );
+ }
+
+ if !app.services_cache_path.exists() {
+ ensure_cache_parent_dir(&app.services_cache_path);
+ services_cache::save_cache(&app.services_cache_path, &empty_signature, &[]);
+ tracing::debug!(
+ path = %app.services_cache_path.display(),
+ "[Init] Created empty service cache"
+ );
+ }
+
+ if !app.sandbox_cache_path.exists() {
+ ensure_cache_parent_dir(&app.sandbox_cache_path);
+ sandbox_cache::save_cache(&app.sandbox_cache_path, &empty_signature, &[]);
+ tracing::debug!(
+ path = %app.sandbox_cache_path.display(),
+ "[Init] Created empty sandbox cache"
+ );
+ }
+}
+
/// What: Apply settings from configuration to application state.
///
/// Inputs:
@@ -206,6 +282,20 @@ pub fn apply_settings_to_app_state(app: &mut AppState, prefs: &crate::theme::Set
app.search_normal_mode = prefs.search_startup_mode;
app.fuzzy_search_enabled = prefs.fuzzy_search;
app.installed_packages_mode = prefs.installed_packages_mode;
+ app.app_mode = if prefs.start_in_news {
+ crate::state::types::AppMode::News
+ } else {
+ crate::state::types::AppMode::Package
+ };
+ app.news_filter_show_arch_news = prefs.news_filter_show_arch_news;
+ app.news_filter_show_advisories = prefs.news_filter_show_advisories;
+ app.news_filter_show_pkg_updates = prefs.news_filter_show_pkg_updates;
+ app.news_filter_show_aur_updates = prefs.news_filter_show_aur_updates;
+ app.news_filter_show_aur_comments = prefs.news_filter_show_aur_comments;
+ app.news_filter_installed_only = prefs.news_filter_installed_only;
+ app.news_max_age_days = prefs.news_max_age_days;
+ // Recompute news results with loaded filters/age
+ app.refresh_news_results();
}
/// What: Check if GNOME terminal is needed and set modal if required.
@@ -334,6 +424,39 @@ fn load_news_read_urls(app: &mut AppState) {
}
}
+/// What: Load news read IDs from disk (feed-level tracking).
+///
+/// Inputs:
+/// - `app`: Application state to update
+///
+/// Output: None (modifies app state in place)
+///
+/// Details:
+/// - Attempts to deserialize news read IDs set from JSON file.
+/// - If no IDs file is found, falls back to populated `news_read_urls` for migration.
+fn load_news_read_ids(app: &mut AppState) {
+ if let Ok(s) = std::fs::read_to_string(&app.news_read_ids_path)
+ && let Ok(set) = serde_json::from_str::>(&s)
+ {
+ app.news_read_ids = set;
+ tracing::info!(
+ path = %app.news_read_ids_path.display(),
+ count = app.news_read_ids.len(),
+ "loaded read news ids"
+ );
+ return;
+ }
+
+ if app.news_read_ids.is_empty() && !app.news_read_urls.is_empty() {
+ app.news_read_ids.extend(app.news_read_urls.iter().cloned());
+ tracing::info!(
+ copied = app.news_read_ids.len(),
+ "seeded news read ids from legacy URL set"
+ );
+ app.news_read_ids_dirty = true;
+ }
+}
+
/// What: Load announcement read IDs from disk.
///
/// Inputs:
@@ -346,8 +469,16 @@ fn load_news_read_urls(app: &mut AppState) {
/// - Handles both old format (single hash) and new format (set of IDs) for migration
fn load_announcement_state(app: &mut AppState) {
// Try old format for migration ({ "hash": "..." })
+ /// What: Legacy announcement read state structure.
+ ///
+ /// Inputs: Deserialized from old announcement read file.
+ ///
+ /// Output: Old state structure for migration.
+ ///
+ /// Details: Used for migrating from old announcement read state format.
#[derive(serde::Deserialize)]
struct OldAnnouncementReadState {
+ /// Announcement hash if read.
hash: Option,
}
if let Ok(s) = std::fs::read_to_string(&app.announcement_read_path) {
@@ -430,6 +561,23 @@ fn check_version_announcement(app: &mut AppState) {
// and will be shown when embedded is dismissed via show_next_pending_announcement()
}
+/// What: Initialize application state by loading settings, caches, and persisted data.
+///
+/// Inputs:
+/// - `app`: Mutable application state to initialize
+/// - `dry_run_flag`: Whether to enable dry-run mode for this session
+/// - `headless`: Whether running in headless/test mode
+///
+/// Output:
+/// - Returns `InitFlags` indicating which caches need background resolution
+///
+/// Details:
+/// - Loads and migrates configuration files
+/// - Initializes locale system and translations
+/// - Loads persisted data: recent searches, install list, details cache, dependency/file/service/sandbox caches
+/// - Loads news read URLs and announcement state
+/// - Loads official package index from disk
+/// - Checks for version-embedded announcements
pub fn initialize_app_state(app: &mut AppState, dry_run_flag: bool, headless: bool) -> InitFlags {
app.dry_run = if dry_run_flag {
true
@@ -445,6 +593,7 @@ pub fn initialize_app_state(app: &mut AppState, dry_run_flag: bool, headless: bo
details_cache = %app.cache_path.display(),
index = %app.official_index_path.display(),
news_read = %app.news_read_path.display(),
+ news_read_ids = %app.news_read_ids_path.display(),
announcement_read = %app.announcement_read_path.display(),
"resolved state file paths"
);
@@ -461,6 +610,28 @@ pub fn initialize_app_state(app: &mut AppState, dry_run_flag: bool, headless: bo
check_gnome_terminal(app, headless);
+ // Show NewsSetup modal on first launch if not configured
+ if !headless && !prefs.startup_news_configured {
+ // Only show if no other modal is already set (e.g., GnomeTerminalPrompt)
+ if matches!(app.modal, crate::state::Modal::None) {
+ app.modal = crate::state::Modal::NewsSetup {
+ show_arch_news: prefs.startup_news_show_arch_news,
+ show_advisories: prefs.startup_news_show_advisories,
+ show_aur_updates: prefs.startup_news_show_aur_updates,
+ show_aur_comments: prefs.startup_news_show_aur_comments,
+ show_pkg_updates: prefs.startup_news_show_pkg_updates,
+ max_age_days: prefs.startup_news_max_age_days,
+ cursor: 0,
+ };
+ }
+ } else if !headless && prefs.startup_news_configured {
+ // Always fetch fresh news in background (using last startup timestamp for incremental updates)
+ // Show loading toast while fetching, but cached items will be displayed immediately
+ app.news_loading = true;
+ app.toast_message = Some(crate::i18n::t(app, "app.news_button.loading"));
+ app.toast_expires_at = None; // No expiration - toast stays until news loading completes
+ }
+
// Check faillock status at startup
if !headless {
let username = std::env::var("USER").unwrap_or_else(|_| "user".to_string());
@@ -474,6 +645,7 @@ pub fn initialize_app_state(app: &mut AppState, dry_run_flag: bool, headless: bo
load_details_cache(app);
load_recent_searches(app);
load_install_list(app);
+ initialize_cache_files(app);
// Load dependency cache after install list is loaded (but before channels are created)
let (deps_cache, needs_deps_resolution) = load_cache_with_signature(
@@ -544,6 +716,7 @@ pub fn initialize_app_state(app: &mut AppState, dry_run_flag: bool, headless: bo
}
load_news_read_urls(app);
+ load_news_read_ids(app);
load_announcement_state(app);
pkgindex::load_from_disk(&app.official_index_path);
@@ -719,6 +892,82 @@ mod tests {
// (KeyMap has many fields, we just verify it's been set)
}
+ #[test]
+ /// What: Verify that `initialize_cache_files` creates placeholder cache files when missing.
+ ///
+ /// Inputs:
+ /// - `AppState` with cache paths pointed to temporary locations that do not yet exist.
+ ///
+ /// Output:
+ /// - Empty dependency, file, service, and sandbox cache files are created.
+ ///
+ /// Details:
+ /// - Validates that startup eagerly materializes cache files instead of delaying until first use.
+ fn initialize_cache_files_creates_empty_placeholders() {
+ let mut app = new_app();
+ let mut deps_path = std::env::temp_dir();
+ deps_path.push(format!(
+ "pacsea_init_deps_cache_{}_{}.json",
+ std::process::id(),
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .expect("System time is before UNIX epoch")
+ .as_nanos()
+ ));
+ let mut files_path = deps_path.clone();
+ files_path.set_file_name("pacsea_init_files_cache.json");
+ let mut services_path = deps_path.clone();
+ services_path.set_file_name("pacsea_init_services_cache.json");
+ let mut sandbox_path = deps_path.clone();
+ sandbox_path.set_file_name("pacsea_init_sandbox_cache.json");
+
+ app.deps_cache_path = deps_path.clone();
+ app.files_cache_path = files_path.clone();
+ app.services_cache_path = services_path.clone();
+ app.sandbox_cache_path = sandbox_path.clone();
+
+ // Ensure paths are clean
+ let _ = std::fs::remove_file(&app.deps_cache_path);
+ let _ = std::fs::remove_file(&app.files_cache_path);
+ let _ = std::fs::remove_file(&app.services_cache_path);
+ let _ = std::fs::remove_file(&app.sandbox_cache_path);
+
+ initialize_cache_files(&app);
+
+ let deps_body = std::fs::read_to_string(&app.deps_cache_path)
+ .expect("Dependency cache file should exist");
+ let deps_cache: crate::app::deps_cache::DependencyCache =
+ serde_json::from_str(&deps_body).expect("Dependency cache should parse");
+ assert!(deps_cache.install_list_signature.is_empty());
+ assert!(deps_cache.dependencies.is_empty());
+
+ let files_body =
+ std::fs::read_to_string(&app.files_cache_path).expect("File cache file should exist");
+ let files_cache: crate::app::files_cache::FileCache =
+ serde_json::from_str(&files_body).expect("File cache should parse");
+ assert!(files_cache.install_list_signature.is_empty());
+ assert!(files_cache.files.is_empty());
+
+ let services_body = std::fs::read_to_string(&app.services_cache_path)
+ .expect("Service cache file should exist");
+ let services_cache: crate::app::services_cache::ServiceCache =
+ serde_json::from_str(&services_body).expect("Service cache should parse");
+ assert!(services_cache.install_list_signature.is_empty());
+ assert!(services_cache.services.is_empty());
+
+ let sandbox_body = std::fs::read_to_string(&app.sandbox_cache_path)
+ .expect("Sandbox cache file should exist");
+ let sandbox_cache: crate::app::sandbox_cache::SandboxCache =
+ serde_json::from_str(&sandbox_body).expect("Sandbox cache should parse");
+ assert!(sandbox_cache.install_list_signature.is_empty());
+ assert!(sandbox_cache.sandbox_info.is_empty());
+
+ let _ = std::fs::remove_file(&app.deps_cache_path);
+ let _ = std::fs::remove_file(&app.files_cache_path);
+ let _ = std::fs::remove_file(&app.services_cache_path);
+ let _ = std::fs::remove_file(&app.sandbox_cache_path);
+ }
+
#[tokio::test]
/// What: Verify that `trigger_initial_resolutions` skips when install list is empty.
///
diff --git a/src/app/runtime/mod.rs b/src/app/runtime/mod.rs
index b07014cd5..310cc8242 100644
--- a/src/app/runtime/mod.rs
+++ b/src/app/runtime/mod.rs
@@ -5,13 +5,21 @@ use crate::state::AppState;
use super::terminal::{restore_terminal, setup_terminal};
+/// Background worker management and spawning.
mod background;
+/// Channel definitions for runtime communication.
mod channels;
+/// Cleanup operations on application exit.
mod cleanup;
+/// Main event loop implementation.
mod event_loop;
+/// Event handlers for different event types.
mod handlers;
+/// Application state initialization module.
pub mod init;
+/// Tick handler for periodic UI updates.
mod tick_handler;
+/// Background worker implementations.
mod workers;
use background::{Channels, spawn_auxiliary_workers, spawn_event_thread};
@@ -19,6 +27,7 @@ use cleanup::cleanup_on_exit;
use event_loop::run_event_loop;
use init::{initialize_app_state, trigger_initial_resolutions};
+/// Result type alias for runtime operations.
type Result = std::result::Result>;
/// What: Run the Pacsea TUI application end-to-end.
@@ -67,17 +76,22 @@ pub async fn run(dry_run_flag: bool) -> Result<()> {
// Create channels and spawn background workers
let mut channels = Channels::new(app.official_index_path.clone());
- // Get updates refresh interval from settings
- let updates_refresh_interval = crate::theme::settings().updates_refresh_interval;
+ // Get updates refresh interval from settings (minimum 60s per requirement)
+ let updates_refresh_interval = crate::theme::settings().updates_refresh_interval.max(60);
// Spawn auxiliary workers (status, news, tick, index updates)
spawn_auxiliary_workers(
headless,
&channels.status_tx,
&channels.news_tx,
+ &channels.news_feed_tx,
+ &channels.news_incremental_tx,
&channels.announcement_tx,
&channels.tick_tx,
+ &app.news_read_ids,
&app.news_read_urls,
+ &app.news_seen_pkg_versions,
+ &app.news_seen_aur_comments,
&app.official_index_path,
&channels.net_err_tx,
&channels.index_notify_tx,
@@ -85,6 +99,7 @@ pub async fn run(dry_run_flag: bool) -> Result<()> {
updates_refresh_interval,
app.installed_packages_mode,
crate::theme::settings().get_announcement,
+ app.last_startup_timestamp.as_deref(),
);
// Spawn event reading thread
diff --git a/src/app/runtime/tick_handler.rs b/src/app/runtime/tick_handler.rs
index df65bffcf..3fd29d766 100644
--- a/src/app/runtime/tick_handler.rs
+++ b/src/app/runtime/tick_handler.rs
@@ -5,15 +5,17 @@ use tokio::sync::mpsc;
use tokio::time::Duration;
use crate::logic::send_query;
-use crate::state::{AppState, ArchStatusColor, Modal, NewsItem, PackageItem, QueryInput};
+use crate::state::{AppState, ArchStatusColor, PackageItem, QueryInput};
use super::super::persist::{
maybe_flush_announcement_read, maybe_flush_cache, maybe_flush_deps_cache,
- maybe_flush_files_cache, maybe_flush_install, maybe_flush_news_read,
+ maybe_flush_files_cache, maybe_flush_install, maybe_flush_news_bookmarks,
+ maybe_flush_news_content_cache, maybe_flush_news_read, maybe_flush_news_read_ids,
+ maybe_flush_news_recent, maybe_flush_news_seen_aur_comments, maybe_flush_news_seen_versions,
maybe_flush_pkgbuild_parse_cache, maybe_flush_recent, maybe_flush_sandbox_cache,
maybe_flush_services_cache,
};
-use super::super::recent::maybe_save_recent;
+use super::super::recent::{maybe_save_news_recent, maybe_save_recent};
/// What: Handle PKGBUILD result event.
///
@@ -523,6 +525,9 @@ fn handle_installed_cache_polling(
/// - Polls installed/explicit caches if needed
/// - Handles ring prefetch, sort menu auto-close, and toast expiration
#[allow(clippy::too_many_arguments)]
+// Function is 151 lines, just 1 line over the threshold. Refactoring would require
+// significant restructuring of the tick handling logic which would reduce readability.
+#[allow(clippy::too_many_lines)] // Function has 205 lines - handles periodic tasks (cache flushing, faillock checks, news content timeouts, preflight resolution, executor requests) that require sequential processing
pub fn handle_tick(
app: &mut AppState,
query_tx: &mpsc::UnboundedSender,
@@ -542,15 +547,23 @@ pub fn handle_tick(
updates_tx: &mpsc::UnboundedSender<(usize, Vec)>,
executor_req_tx: &mpsc::UnboundedSender,
post_summary_req_tx: &mpsc::UnboundedSender<(Vec, Option)>,
+ news_content_req_tx: &mpsc::UnboundedSender,
) {
// Check faillock status periodically (every minute via worker, but also check here)
// We check every tick but only update if enough time has passed
static LAST_FAILLOCK_CHECK: std::sync::OnceLock> =
std::sync::OnceLock::new();
maybe_save_recent(app);
+ maybe_save_news_recent(app);
maybe_flush_cache(app);
maybe_flush_recent(app);
+ maybe_flush_news_recent(app);
+ maybe_flush_news_bookmarks(app);
+ maybe_flush_news_content_cache(app);
maybe_flush_news_read(app);
+ maybe_flush_news_read_ids(app);
+ maybe_flush_news_seen_versions(app);
+ maybe_flush_news_seen_aur_comments(app);
maybe_flush_announcement_read(app);
maybe_flush_install(app);
maybe_flush_deps_cache(app);
@@ -584,13 +597,49 @@ pub fn handle_tick(
app.faillock_remaining_minutes = remaining_minutes;
}
+ // Timeout guard for news content fetches to avoid stuck "Loading content..."
+ // Only check timeout if main news feed is not loading (to avoid showing timeout toast during initial load)
+ if app.news_content_loading && !app.news_loading {
+ if let Some(started) = app.news_content_loading_since {
+ if started.elapsed() > std::time::Duration::from_secs(10) {
+ let url = app
+ .news_results
+ .get(app.news_selected)
+ .and_then(|it| it.url.clone());
+ tracing::warn!(
+ selected = app.news_selected,
+ url = ?url,
+ elapsed_ms = started.elapsed().as_millis(),
+ "news_content: timed out waiting for response"
+ );
+ app.news_content_loading = false;
+ app.news_content_loading_since = None;
+ app.news_content = Some("Failed to load content: timed out after 10s".to_string());
+ app.toast_message = Some("News content timed out".to_string());
+ app.toast_expires_at = Some(Instant::now() + std::time::Duration::from_secs(3));
+ } else {
+ tracing::trace!(
+ selected = app.news_selected,
+ elapsed_ms = started.elapsed().as_millis(),
+ "news_content: still loading"
+ );
+ }
+ } else {
+ // Ensure we set a start time if missing for safety
+ app.news_content_loading_since = Some(Instant::now());
+ }
+ }
+
// Refresh updates list if flag is set (manual refresh via button click)
if app.refresh_updates {
app.refresh_updates = false;
app.updates_loading = true;
- crate::app::runtime::workers::auxiliary::spawn_updates_worker(updates_tx.clone());
+ crate::app::runtime::workers::updates::spawn_updates_worker(updates_tx.clone());
}
+ // Request news content if in news mode and content not cached
+ crate::events::utils::maybe_request_news_content(app, news_content_req_tx);
+
handle_preflight_resolution(
app,
deps_req_tx,
@@ -674,8 +723,11 @@ pub fn handle_tick(
app.sort_menu_auto_close_at = None;
}
+ // Clear expired toast, but don't clear news loading toast while news are still loading
if let Some(deadline) = app.toast_expires_at
&& std::time::Instant::now() >= deadline
+ && !app.news_loading
+ // Don't clear toast if news are still loading
{
app.toast_message = None;
app.toast_expires_at = None;
@@ -686,29 +738,43 @@ pub fn handle_tick(
///
/// Inputs:
/// - `app`: Application state
-/// - `todays`: List of news items
+/// - `items`: List of news feed items
///
/// Details:
/// - Shows toast if no new news
/// - Opens news modal if there are unread items
-pub fn handle_news(app: &mut AppState, todays: &[NewsItem]) {
- if todays.is_empty() {
- app.toast_message = Some(crate::i18n::t(app, "app.toasts.no_new_news"));
- app.toast_expires_at = Some(Instant::now() + Duration::from_secs(10));
+/// - Clears `news_loading` flag only when news modal is actually shown
+pub fn handle_news(app: &mut AppState, items: &[crate::state::types::NewsFeedItem]) {
+ tracing::info!(
+ items_count = items.len(),
+ current_modal = ?app.modal,
+ news_loading = app.news_loading,
+ "handle_news called"
+ );
+ // Don't clear news_loading or toast here - the main news feed pane may still be loading.
+ // The loading toast and flag will be cleared when handle_news_feed_items receives the aggregated feed.
+
+ if items.is_empty() {
+ // No news available - set ready flag to false
+ tracing::info!("no news items, marking as not ready");
+ app.news_ready = false;
} else {
- // Queue news to show after all announcements are dismissed
- // Only show immediately if no modal is currently displayed
- if matches!(app.modal, Modal::None) {
- app.modal = Modal::News {
- items: todays.to_vec(),
- selected: 0,
- };
- tracing::info!("showing news modal immediately (no other modals)");
- } else {
- // Queue news to show after announcements
- app.pending_news = Some(todays.to_vec());
- tracing::debug!("queued news (modal already open, will show after announcements)");
- }
+ // News are ready - set flag and store items for button click
+ tracing::info!("news items available, marking as ready");
+ app.news_ready = true;
+ // Store news items for later display when button is clicked
+ // Convert NewsFeedItem to NewsItem for pending_news (legacy format)
+ let legacy_items: Vec = items
+ .iter()
+ .filter_map(|item| {
+ item.url.as_ref().map(|url| crate::state::NewsItem {
+ date: item.date.clone(),
+ title: item.title.clone(),
+ url: url.clone(),
+ })
+ })
+ .collect();
+ app.pending_news = Some(legacy_items);
}
}
@@ -771,6 +837,7 @@ mod tests {
let (updates_tx, _updates_rx) = mpsc::unbounded_channel();
let (executor_req_tx, _executor_req_rx) = mpsc::unbounded_channel();
let (post_summary_req_tx, _post_summary_req_rx) = mpsc::unbounded_channel();
+ let (news_content_req_tx, _news_content_req_rx) = mpsc::unbounded_channel();
// Should not panic
handle_tick(
@@ -786,6 +853,7 @@ mod tests {
&updates_tx,
&executor_req_tx,
&post_summary_req_tx,
+ &news_content_req_tx,
);
}
@@ -836,6 +904,7 @@ mod tests {
let (updates_tx, _updates_rx) = mpsc::unbounded_channel();
let (executor_req_tx, _executor_req_rx) = mpsc::unbounded_channel();
let (post_summary_req_tx, _post_summary_req_rx) = mpsc::unbounded_channel();
+ let (news_content_req_tx, _news_content_req_rx) = mpsc::unbounded_channel();
handle_tick(
&mut app,
@@ -850,6 +919,7 @@ mod tests {
&updates_tx,
&executor_req_tx,
&post_summary_req_tx,
+ &news_content_req_tx,
);
// Queues should be cleared
@@ -902,6 +972,7 @@ mod tests {
let (updates_tx, _updates_rx) = mpsc::unbounded_channel();
let (executor_req_tx, _executor_req_rx) = mpsc::unbounded_channel();
let (post_summary_req_tx, _post_summary_req_rx) = mpsc::unbounded_channel();
+ let (news_content_req_tx, _news_content_req_rx) = mpsc::unbounded_channel();
handle_tick(
&mut app,
@@ -916,6 +987,7 @@ mod tests {
&updates_tx,
&executor_req_tx,
&post_summary_req_tx,
+ &news_content_req_tx,
);
// Request should be sent
@@ -940,45 +1012,56 @@ mod tests {
/// - Tests that empty news list shows appropriate message
fn handle_news_shows_toast_when_empty() {
let mut app = new_app();
- let news: Vec = vec![];
+ let news: Vec = vec![];
handle_news(&mut app, &news);
- // Toast should be set
- assert!(app.toast_message.is_some());
- assert!(app.toast_expires_at.is_some());
+ // News should not be ready
+ assert!(!app.news_ready);
+ // Toast should be cleared
+ assert!(app.toast_message.is_none());
+ assert!(app.toast_expires_at.is_none());
}
#[test]
- /// What: Verify that `handle_news` opens modal when news available.
+ /// What: Verify that `handle_news` sets `news_ready` and stores news for button click.
///
/// Inputs:
/// - `AppState`
/// - Non-empty news list
///
/// Output:
- /// - News modal is opened
- /// - First item is selected
+ /// - `news_ready` is true
+ /// - `pending_news` is set with news items
+ /// - Modal is NOT automatically opened (waiting for button click)
///
/// Details:
- /// - Tests that news modal is properly opened
+ /// - Tests that news are marked as ready and stored for later display
fn handle_news_opens_modal_when_available() {
let mut app = new_app();
- let news = vec![NewsItem {
- title: "Test News".to_string(),
- url: "https://example.com/news".to_string(),
+ let news = vec![crate::state::types::NewsFeedItem {
+ id: "https://example.com/news".to_string(),
date: String::new(),
+ title: "Test News".to_string(),
+ summary: None,
+ url: Some("https://example.com/news".to_string()),
+ source: crate::state::types::NewsFeedSource::ArchNews,
+ severity: None,
+ packages: Vec::new(),
}];
handle_news(&mut app, &news);
- // Modal should be opened
- if let crate::state::Modal::News { items, selected } = &app.modal {
- assert_eq!(items.len(), 1);
- assert_eq!(selected, &0);
- } else {
- panic!("Expected News modal");
+ // News should be ready
+ assert!(app.news_ready);
+ // Pending news should be set
+ assert!(app.pending_news.is_some());
+ if let Some(pending) = &app.pending_news {
+ assert_eq!(pending.len(), 1);
+ assert_eq!(pending[0].title, "Test News");
}
+ // Modal should NOT be automatically opened (waiting for button click)
+ assert!(matches!(app.modal, crate::state::Modal::None));
}
#[test]
diff --git a/src/app/runtime/workers/auxiliary.rs b/src/app/runtime/workers/auxiliary.rs
index ea7f8d472..80e99ce1d 100644
--- a/src/app/runtime/workers/auxiliary.rs
+++ b/src/app/runtime/workers/auxiliary.rs
@@ -1,730 +1,346 @@
use std::sync::Arc;
-use std::sync::OnceLock;
use std::sync::atomic::AtomicBool;
use crossterm::event::Event as CEvent;
-use tokio::{
- sync::mpsc,
- time::{Duration, sleep},
-};
+use tokio::sync::mpsc;
+use tokio::sync::oneshot;
+use tokio::time::{Duration, sleep};
use crate::index as pkgindex;
use crate::sources;
-use crate::state::{ArchStatusColor, NewsItem};
+use crate::state::ArchStatusColor;
-/// What: Spawn background workers for status, news, announcements, and tick events.
+use crate::app::runtime::workers::news;
+use crate::app::runtime::workers::updates;
+
+/// What: Spawns Arch status worker that fetches status once at startup and periodically.
///
/// Inputs:
-/// - `headless`: When `true`, skip terminal-dependent operations
/// - `status_tx`: Channel sender for Arch status updates
-/// - `news_tx`: Channel sender for Arch news updates
-/// - `announcement_tx`: Channel sender for remote announcement updates
-/// - `tick_tx`: Channel sender for tick events
-/// - `news_read_urls`: Set of already-read news URLs
-/// - `official_index_path`: Path to official package index
-/// - `net_err_tx`: Channel sender for network errors
-/// - `index_notify_tx`: Channel sender for index update notifications
-/// - `updates_tx`: Channel sender for package updates
-/// - `updates_refresh_interval`: Refresh interval in seconds for pacman -Qu and AUR helper checks
-/// - `installed_packages_mode`: Filter mode for installed packages (leaf only vs all explicit)
-/// - `get_announcement`: Whether to fetch remote announcements from GitHub Gist
+///
+/// Output:
+/// - None (spawns async task)
///
/// Details:
-/// - Fetches Arch status text once at startup and periodically every 120 seconds
-/// - Fetches Arch news once at startup, filtering out already-read items
-/// - Fetches remote announcement once at startup if URL is configured
-/// - Updates package index in background (Windows vs non-Windows handling)
-/// - Refreshes pacman caches (installed, explicit) using the configured installed packages mode
-/// - Spawns tick worker that sends events every 200ms
-/// - Checks for available package updates once at startup and periodically at configured interval
-#[allow(clippy::too_many_arguments)]
-pub fn spawn_auxiliary_workers(
- headless: bool,
- status_tx: &mpsc::UnboundedSender<(String, ArchStatusColor)>,
- news_tx: &mpsc::UnboundedSender>,
- announcement_tx: &mpsc::UnboundedSender,
- tick_tx: &mpsc::UnboundedSender<()>,
- news_read_urls: &std::collections::HashSet,
- official_index_path: &std::path::Path,
- net_err_tx: &mpsc::UnboundedSender,
- index_notify_tx: &mpsc::UnboundedSender<()>,
- updates_tx: &mpsc::UnboundedSender<(usize, Vec)>,
- updates_refresh_interval: u64,
- installed_packages_mode: crate::state::InstalledPackagesMode,
- get_announcement: bool,
-) {
- // Fetch Arch status text once at startup (skip in headless mode to avoid network delays)
- if !headless {
- let status_tx_once = status_tx.clone();
- tokio::spawn(async move {
- if let Ok((txt, color)) = sources::fetch_arch_status_text().await {
- let _ = status_tx_once.send((txt, color));
- }
- });
-
- // Periodically refresh Arch status every 120 seconds
- let status_tx_periodic = status_tx.clone();
- tokio::spawn(async move {
- loop {
- sleep(Duration::from_secs(120)).await;
- if let Ok((txt, color)) = sources::fetch_arch_status_text().await {
- let _ = status_tx_periodic.send((txt, color));
- }
- }
- });
- }
-
- // Fetch Arch news once at startup; show unread items (by URL) if any (skip in headless mode)
- if !headless {
- let news_tx_once = news_tx.clone();
- let read_set = news_read_urls.clone();
- tokio::spawn(async move {
- if let Ok(list) = sources::fetch_arch_news(10).await {
- let unread: Vec = list
- .into_iter()
- .filter(|it| !read_set.contains(&it.url))
- .collect();
- let _ = news_tx_once.send(unread);
- }
- });
- }
-
- // Fetch remote announcement once at startup if enabled (skip in headless mode)
- if !headless && get_announcement {
- let announcement_tx_once = announcement_tx.clone();
- // Hardcoded Gist URL for remote announcements
- let url = "https://gist.githubusercontent.com/Firstp1ck/d2e6016b8d7a90f813a582078208e9bd/raw/announcement.json".to_string();
- tokio::spawn(async move {
- tracing::info!(url = %url, "fetching remote announcement");
- match reqwest::get(&url).await {
- Ok(response) => {
- tracing::debug!(
- status = response.status().as_u16(),
- "announcement fetch response received"
- );
- match response
- .json::()
- .await
- {
- Ok(json) => {
- tracing::info!(id = %json.id, "announcement fetched successfully");
- let _ = announcement_tx_once.send(json);
- }
- Err(e) => {
- tracing::warn!(error = %e, "failed to parse announcement JSON");
- }
- }
- }
- Err(e) => {
- tracing::warn!(url = %url, error = %e, "failed to fetch announcement");
- }
- }
- });
- }
-
- #[cfg(windows)]
- {
- // Save mirrors into the repository directory in the source tree and build the index via Arch API
- let repo_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("repository");
- let index_path = official_index_path.to_path_buf();
- let net_err = net_err_tx.clone();
- let index_notify = index_notify_tx.clone();
- tokio::spawn(async move {
- crate::index::refresh_windows_mirrors_and_index(
- index_path,
- repo_dir,
- net_err,
- index_notify,
- )
- .await;
- });
- }
- #[cfg(not(windows))]
- {
- // Skip index update in headless mode to avoid slow network/disk operations
- if !headless {
- let index_path = official_index_path.to_path_buf();
- let net_err = net_err_tx.clone();
- let index_notify = index_notify_tx.clone();
- tokio::spawn(async move {
- pkgindex::update_in_background(index_path, net_err, index_notify).await;
- });
+/// - Fetches Arch status text once at startup
+/// - Periodically refreshes Arch status every 120 seconds
+fn spawn_status_worker(status_tx: &mpsc::UnboundedSender<(String, ArchStatusColor)>) {
+ // Fetch Arch status text once at startup
+ let status_tx_once = status_tx.clone();
+ tokio::spawn(async move {
+ if let Ok((txt, color)) = sources::fetch_arch_status_text().await {
+ let _ = status_tx_once.send((txt, color));
}
- }
-
- // Skip pacman cache refreshes in headless mode to avoid slow process spawning
- if !headless {
- let mode = installed_packages_mode;
- tokio::spawn(async move {
- pkgindex::refresh_installed_cache().await;
- // Use the configured mode from settings
- pkgindex::refresh_explicit_cache(mode).await;
- });
- }
-
- // Check for available package updates once at startup (skip in headless mode)
- if !headless {
- spawn_updates_worker(updates_tx.clone());
-
- // Periodically refresh updates list at configured interval
- let updates_tx_periodic = updates_tx.clone();
- tokio::spawn(async move {
- let mut interval = tokio::time::interval(Duration::from_secs(updates_refresh_interval));
- // Skip the first tick to avoid immediate refresh after startup
- interval.tick().await;
- loop {
- interval.tick().await;
- spawn_updates_worker(updates_tx_periodic.clone());
- }
- });
- }
+ });
- // Spawn tick worker
- let tick_tx_bg = tick_tx.clone();
+ // Periodically refresh Arch status every 120 seconds
+ let status_tx_periodic = status_tx.clone();
tokio::spawn(async move {
- let mut interval = tokio::time::interval(Duration::from_millis(200));
loop {
- interval.tick().await;
- let _ = tick_tx_bg.send(());
+ sleep(Duration::from_secs(120)).await;
+ if let Ok((txt, color)) = sources::fetch_arch_status_text().await {
+ let _ = status_tx_periodic.send((txt, color));
+ }
}
});
-
- // Spawn faillock check worker (runs every minute)
- if !headless {
- let faillock_tx = tick_tx.clone();
- tokio::spawn(async move {
- let mut interval = tokio::time::interval(Duration::from_secs(60));
- // Skip the first tick to avoid immediate check after startup
- interval.tick().await;
- loop {
- interval.tick().await;
- // Trigger a tick to update faillock status in the UI
- let _ = faillock_tx.send(());
- }
- });
- }
}
-/// What: Check which AUR helper is available (paru or yay).
+/// What: Spawns announcement worker that fetches remote announcement from GitHub Gist.
///
-/// Output:
-/// - Tuple of (`has_paru`, `has_yay`, `helper_name`)
-fn check_aur_helper() -> (bool, bool, &'static str) {
- use std::process::{Command, Stdio};
-
- let has_paru = Command::new("paru")
- .args(["--version"])
- .stdin(Stdio::null())
- .stdout(Stdio::null())
- .stderr(Stdio::null())
- .output()
- .is_ok();
-
- let has_yay = if has_paru {
- false
- } else {
- Command::new("yay")
- .args(["--version"])
- .stdin(Stdio::null())
- .stdout(Stdio::null())
- .stderr(Stdio::null())
- .output()
- .is_ok()
- };
-
- let helper = if has_paru { "paru" } else { "yay" };
- if has_paru || has_yay {
- tracing::debug!("Using {} to check for AUR updates", helper);
- }
-
- (has_paru, has_yay, helper)
-}
-
-/// What: Check if fakeroot is available on the system.
-///
-/// Output:
-/// - `true` if fakeroot is available, `false` otherwise
-///
-/// Details:
-/// - Fakeroot is required to sync a temporary pacman database without root
-#[cfg(not(target_os = "windows"))]
-fn has_fakeroot() -> bool {
- use std::process::{Command, Stdio};
-
- Command::new("fakeroot")
- .args(["--version"])
- .stdin(Stdio::null())
- .stdout(Stdio::null())
- .stderr(Stdio::null())
- .output()
- .is_ok()
-}
-
-/// What: Get the current user's UID by reading /proc/self/status.
+/// Inputs:
+/// - `announcement_tx`: Channel sender for remote announcement updates
///
/// Output:
-/// - `Some(u32)` with the UID if successful
-/// - `None` if unable to read the UID
+/// - None (spawns async task)
///
/// Details:
-/// - Reads /proc/self/status and parses the Uid line
-/// - Returns the real UID (first value on the Uid line)
-#[cfg(not(target_os = "windows"))]
-fn get_uid() -> Option {
- let status = std::fs::read_to_string("/proc/self/status").ok()?;
- for line in status.lines() {
- if line.starts_with("Uid:") {
- // Format: "Uid:\treal\teffective\tsaved\tfs"
- let parts: Vec<&str> = line.split_whitespace().collect();
- if parts.len() >= 2 {
- return parts[1].parse().ok();
+/// - Fetches remote announcement from hardcoded Gist URL
+/// - Sends announcement to channel if successfully fetched and parsed
+fn spawn_announcement_worker(
+ announcement_tx: &mpsc::UnboundedSender,
+) {
+ let announcement_tx_once = announcement_tx.clone();
+ // Hardcoded Gist URL for remote announcements
+ let url = "https://gist.githubusercontent.com/Firstp1ck/d2e6016b8d7a90f813a582078208e9bd/raw/announcement.json".to_string();
+ tokio::spawn(async move {
+ tracing::info!(url = %url, "fetching remote announcement");
+ match reqwest::get(&url).await {
+ Ok(response) => {
+ tracing::debug!(
+ status = response.status().as_u16(),
+ "announcement fetch response received"
+ );
+ match response
+ .json::()
+ .await
+ {
+ Ok(json) => {
+ tracing::info!(id = %json.id, "announcement fetched successfully");
+ let _ = announcement_tx_once.send(json);
+ }
+ Err(e) => {
+ tracing::warn!(error = %e, "failed to parse announcement JSON");
+ }
+ }
+ }
+ Err(e) => {
+ tracing::warn!(url = %url, error = %e, "failed to fetch announcement");
}
}
- }
- None
+ });
}
-/// What: Set up a temporary pacman database directory for safe update checks.
+/// What: Spawns index update worker for Windows platform.
+///
+/// Inputs:
+/// - `official_index_path`: Path to official package index
+/// - `net_err_tx`: Channel sender for network errors
+/// - `index_notify_tx`: Channel sender for index update notifications
///
/// Output:
-/// - `Some(PathBuf)` with the temp database path if setup succeeds
-/// - `None` if setup fails
+/// - None (spawns async task)
///
/// Details:
-/// - Creates `/tmp/pacsea-db-{UID}/` directory
-/// - Creates a symlink from `local` to `/var/lib/pacman/local`
-/// - The symlink allows pacman to know which packages are installed
-/// - Directory is kept for reuse across subsequent checks
-#[cfg(not(target_os = "windows"))]
-fn setup_temp_db() -> Option {
- // Get current user ID
- let uid = get_uid()?;
- let temp_db = std::path::PathBuf::from(format!("/tmp/pacsea-db-{uid}"));
-
- // Create directory if needed
- if let Err(e) = std::fs::create_dir_all(&temp_db) {
- tracing::warn!("Failed to create temp database directory: {}", e);
- return None;
- }
-
- // Create symlink to local database (skip if exists)
- let local_link = temp_db.join("local");
- if !local_link.exists()
- && let Err(e) = std::os::unix::fs::symlink("/var/lib/pacman/local", &local_link)
- {
- tracing::warn!("Failed to create symlink to local database: {}", e);
- return None;
- }
-
- Some(temp_db)
+/// - Windows-specific: saves mirrors and builds index via Arch API
+#[cfg(windows)]
+fn spawn_index_update_worker(
+ official_index_path: &std::path::Path,
+ net_err_tx: &mpsc::UnboundedSender,
+ index_notify_tx: &mpsc::UnboundedSender<()>,
+) {
+ let repo_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("repository");
+ let index_path = official_index_path.to_path_buf();
+ let net_err = net_err_tx.clone();
+ let index_notify = index_notify_tx.clone();
+ tokio::spawn(async move {
+ crate::index::refresh_windows_mirrors_and_index(
+ index_path,
+ repo_dir,
+ net_err,
+ index_notify,
+ )
+ .await;
+ });
}
-/// What: Sync the temporary pacman database with remote repositories.
+/// What: Spawns index update worker for non-Windows platforms.
///
/// Inputs:
-/// - `temp_db`: Path to the temporary database directory
+/// - `headless`: When `true`, skip index update
+/// - `official_index_path`: Path to official package index
+/// - `net_err_tx`: Channel sender for network errors
+/// - `index_notify_tx`: Channel sender for index update notifications
///
/// Output:
-/// - `true` if sync succeeds, `false` otherwise
+/// - None (spawns async task)
///
/// Details:
-/// - Uses fakeroot to run `pacman -Sy` without root privileges
-/// - Syncs only the temporary database, not the system database
-/// - Uses `--logfile /dev/null` to prevent log file creation
-#[cfg(not(target_os = "windows"))]
-fn sync_temp_db(temp_db: &std::path::Path) -> bool {
- use std::process::{Command, Stdio};
-
- let output = Command::new("fakeroot")
- .args(["--", "pacman", "-Sy", "--dbpath"])
- .arg(temp_db)
- .args(["--logfile", "/dev/null"])
- .stdin(Stdio::null())
- .stdout(Stdio::null())
- .stderr(Stdio::null())
- .output();
-
- matches!(output, Ok(o) if o.status.success())
+/// - Updates package index in background
+/// - Skips in headless mode to avoid slow network/disk operations
+#[cfg(not(windows))]
+fn spawn_index_update_worker(
+ headless: bool,
+ official_index_path: &std::path::Path,
+ net_err_tx: &mpsc::UnboundedSender,
+ index_notify_tx: &mpsc::UnboundedSender<()>,
+) {
+ if headless {
+ return;
+ }
+ let index_path = official_index_path.to_path_buf();
+ let net_err = net_err_tx.clone();
+ let index_notify = index_notify_tx.clone();
+ tokio::spawn(async move {
+ pkgindex::update_in_background(index_path, net_err, index_notify).await;
+ });
}
-/// What: Parse packages from pacman -Qu output.
+/// What: Spawns cache refresh worker that refreshes pacman caches.
///
/// Inputs:
-/// - `output`: Raw command output bytes
+/// - `installed_packages_mode`: Filter mode for installed packages
///
/// Output:
-/// - Vector of (`package_name`, `old_version`, `new_version`) tuples
+/// - None (spawns async task)
///
/// Details:
-/// - Parses `"package-name old_version -> new_version"` format
-fn parse_checkupdates(output: &[u8]) -> Vec<(String, String, String)> {
- String::from_utf8_lossy(output)
- .lines()
- .filter_map(|line| {
- let trimmed = line.trim();
- if trimmed.is_empty() {
- None
- } else {
- // Parse "package-name old_version -> new_version" format
- trimmed.find(" -> ").and_then(|arrow_pos| {
- let before_arrow = &trimmed[..arrow_pos];
- let after_arrow = &trimmed[arrow_pos + 4..];
- let parts: Vec<&str> = before_arrow.split_whitespace().collect();
- if parts.len() >= 2 {
- let name = parts[0].to_string();
- let old_version = parts[1..].join(" "); // In case version has spaces
- let new_version = after_arrow.trim().to_string();
- Some((name, old_version, new_version))
- } else {
- None
- }
- })
- }
- })
- .collect()
+/// - Refreshes installed and explicit package caches
+/// - Uses the configured installed packages mode
+fn spawn_cache_refresh_worker(installed_packages_mode: crate::state::InstalledPackagesMode) {
+ let mode = installed_packages_mode;
+ tokio::spawn(async move {
+ pkgindex::refresh_installed_cache().await;
+ pkgindex::refresh_explicit_cache(mode).await;
+ });
}
-/// What: Parse packages from -Qua output.
+/// What: Spawns tick worker that sends tick events every 200ms.
///
/// Inputs:
-/// - `output`: Raw command output bytes
+/// - `tick_tx`: Channel sender for tick events
///
/// Output:
-/// - Vector of (`package_name`, `old_version`, `new_version`) tuples
+/// - None (spawns async task)
///
/// Details:
-/// - Parses "package old -> new" format
-fn parse_qua(output: &[u8]) -> Vec<(String, String, String)> {
- String::from_utf8_lossy(output)
- .lines()
- .filter_map(|line| {
- let trimmed = line.trim();
- if trimmed.is_empty() {
- None
- } else {
- // Parse "package old -> new" format
- trimmed.find(" -> ").and_then(|arrow_pos| {
- let before_arrow = &trimmed[..arrow_pos];
- let after_arrow = &trimmed[arrow_pos + 4..];
- let parts: Vec<&str> = before_arrow.split_whitespace().collect();
- if parts.len() >= 2 {
- let name = parts[0].to_string();
- let old_version = parts[1..].join(" "); // In case version has spaces
- let new_version = after_arrow.trim().to_string();
- Some((name, old_version, new_version))
- } else {
- None
- }
- })
- }
- })
- .collect()
-}
-
-/// What: Process pacman -Qu output and add packages to collections.
-///
-/// Inputs:
-/// - `output`: Command output result
-/// - `packages_map`: Mutable `HashMap` to store formatted package strings
-/// - `packages_set`: Mutable `HashSet` to track unique package names
-fn process_checkupdates_output(
- output: Result,
- packages_map: &mut std::collections::HashMap,
- packages_set: &mut std::collections::HashSet,
-) {
- match output {
- Ok(output) => {
- let exit_code = output.status.code();
- if output.status.success() {
- let packages = parse_checkupdates(&output.stdout);
- let count = packages.len();
-
- // Parse pacman -Qu output which already contains old and new versions
- for (name, old_version, new_version) in packages {
- // Format: "name - old_version -> name - new_version"
- let formatted = format!("{name} - {old_version} -> {name} - {new_version}");
- packages_map.insert(name.clone(), formatted);
- packages_set.insert(name);
- }
-
- tracing::debug!(
- "pacman -Qu completed successfully (exit code: {:?}): found {} packages from official repos",
- exit_code,
- count
- );
- } else if output.status.code() == Some(1) {
- // Exit code 1 is normal (no updates)
- tracing::debug!(
- "pacman -Qu returned exit code 1 (no updates available in official repos)"
- );
- } else {
- // Other exit codes are errors
- tracing::warn!("pacman -Qu command failed with exit code: {:?}", exit_code);
- }
- }
- Err(e) => {
- tracing::warn!("Failed to execute pacman -Qu: {}", e);
- }
- }
-}
-
-/// What: Process -Qua output and add packages to collections.
-///
-/// Inputs:
-/// - `result`: Command output result
-/// - `helper`: Helper name for logging
-/// - `packages_map`: Mutable `HashMap` to store formatted package strings
-/// - `packages_set`: Mutable `HashSet` to track unique package names
-fn process_qua_output(
- result: Option>,
- helper: &str,
- packages_map: &mut std::collections::HashMap,
- packages_set: &mut std::collections::HashSet,
-) {
- if let Some(result) = result {
- match result {
- Ok(output) => {
- let exit_code = output.status.code();
- if output.status.success() {
- let packages = parse_qua(&output.stdout);
- let count = packages.len();
- let before_count = packages_set.len();
-
- for (name, old_version, new_version) in packages {
- // Format: "name - old_version -> name - new_version"
- let formatted = format!("{name} - {old_version} -> {name} - {new_version}");
- packages_map.insert(name.clone(), formatted);
- packages_set.insert(name);
- }
-
- let after_count = packages_set.len();
- tracing::debug!(
- "{} -Qua completed successfully (exit code: {:?}): found {} packages from AUR, {} total ({} new)",
- helper,
- exit_code,
- count,
- after_count,
- after_count - before_count
- );
- } else if output.status.code() == Some(1) {
- // Exit code 1 is normal (no updates)
- tracing::debug!(
- "{} -Qua returned exit code 1 (no updates available in AUR)",
- helper
- );
- } else {
- // Other exit codes are errors
- tracing::warn!(
- "{} -Qua command failed with exit code: {:?}",
- helper,
- exit_code
- );
- }
- }
- Err(e) => {
- tracing::warn!("Failed to execute {} -Qua: {}", helper, e);
- }
+/// - Sends tick events every 200ms to drive UI updates
+fn spawn_tick_worker(tick_tx: &mpsc::UnboundedSender<()>) {
+ let tick_tx_bg = tick_tx.clone();
+ tokio::spawn(async move {
+ let mut interval = tokio::time::interval(Duration::from_millis(200));
+ loop {
+ interval.tick().await;
+ let _ = tick_tx_bg.send(());
}
- } else {
- tracing::debug!("No AUR helper available, skipping AUR updates check");
- }
+ });
}
-/// Static mutex to prevent concurrent update checks.
-///
-/// What: Tracks whether an update check is currently in progress.
-///
-/// Details:
-/// - Uses `OnceLock` for lazy initialization
-/// - Uses `tokio::sync::Mutex` for async-safe synchronization
-/// - Prevents overlapping file writes to `available_updates.txt`
-static UPDATE_CHECK_IN_PROGRESS: OnceLock> = OnceLock::new();
-
-/// What: Spawn background worker to check for available package updates.
+/// What: Spawns faillock check worker that triggers tick events every minute.
///
/// Inputs:
-/// - `updates_tx`: Channel sender for updates (count, sorted list)
+/// - `tick_tx`: Channel sender for tick events
///
/// Output:
/// - None (spawns async task)
///
/// Details:
-/// - Uses a temporary database to safely check for updates without modifying the system
-/// - Syncs the temp database with `fakeroot pacman -Sy` if fakeroot is available
-/// - Falls back to `pacman -Qu` (stale local DB) if fakeroot is not available
-/// - Executes `yay -Qua` or `paru -Qua` for AUR updates
-/// - Removes duplicates using `HashSet`
-/// - Sorts package names alphabetically
-/// - Saves list to `~/.config/pacsea/lists/available_updates.txt`
-/// - Sends `(count, sorted_list)` via channel
-/// - Uses synchronization to prevent concurrent update checks and file writes
-pub fn spawn_updates_worker(updates_tx: mpsc::UnboundedSender<(usize, Vec)>) {
- let updates_tx_once = updates_tx;
-
+/// - Triggers tick events every 60 seconds to update faillock status in UI
+fn spawn_faillock_worker(tick_tx: &mpsc::UnboundedSender<()>) {
+ let faillock_tx = tick_tx.clone();
tokio::spawn(async move {
- // Get mutex reference inside async block
- let mutex = UPDATE_CHECK_IN_PROGRESS.get_or_init(|| tokio::sync::Mutex::new(false));
-
- // Check if update check is already in progress
- let mut in_progress = mutex.lock().await;
- if *in_progress {
- tracing::debug!("Update check already in progress, skipping concurrent call");
- return;
+ let mut interval = tokio::time::interval(Duration::from_secs(60));
+ // Skip the first tick to avoid immediate check after startup
+ interval.tick().await;
+ loop {
+ interval.tick().await;
+ // Trigger a tick to update faillock status in the UI
+ let _ = faillock_tx.send(());
}
+ });
+}
- // Set flag to indicate update check is in progress
- *in_progress = true;
- drop(in_progress); // Release lock before blocking operation
-
- let result = tokio::task::spawn_blocking(move || {
- use std::collections::HashSet;
- use std::process::{Command, Stdio};
-
- tracing::debug!("Starting update check");
-
- let (has_paru, has_yay, helper) = check_aur_helper();
-
- // Try safe update check with temp database (non-Windows only)
- #[cfg(not(target_os = "windows"))]
- let temp_db_path: Option = if has_fakeroot() {
- tracing::debug!("fakeroot is available, setting up temp database");
- setup_temp_db().and_then(|temp_db| {
- tracing::debug!("Syncing temporary database at {:?}", temp_db);
- if sync_temp_db(&temp_db) {
- tracing::debug!("Temp database sync successful");
- Some(temp_db)
- } else {
- tracing::warn!("Temp database sync failed, falling back to pacman -Qu");
- None
- }
- })
- } else {
- tracing::debug!("fakeroot not available, falling back to pacman -Qu");
- None
- };
-
- // Execute pacman -Qu with appropriate --dbpath
- #[cfg(not(target_os = "windows"))]
- let output_checkupdates = temp_db_path.as_ref().map_or_else(
- || {
- tracing::debug!("Executing: pacman -Qu (using system database)");
- Command::new("pacman")
- .args(["-Qu"])
- .stdin(Stdio::null())
- .stdout(Stdio::piped())
- .stderr(Stdio::null())
- .output()
- },
- |db_path| {
- tracing::debug!(
- "Executing: pacman -Qu --dbpath {:?} (using synced temp database)",
- db_path
- );
- Command::new("pacman")
- .args(["-Qu", "--dbpath"])
- .arg(db_path)
- .stdin(Stdio::null())
- .stdout(Stdio::piped())
- .stderr(Stdio::null())
- .output()
- },
- );
-
- #[cfg(target_os = "windows")]
- let output_checkupdates = {
- tracing::debug!("Executing: pacman -Qu (Windows fallback)");
- Command::new("pacman")
- .args(["-Qu"])
- .stdin(Stdio::null())
- .stdout(Stdio::piped())
- .stderr(Stdio::null())
- .output()
- };
-
- // Execute -Qua command (AUR) - only if helper is available
- let output_qua = if has_paru {
- tracing::debug!("Executing: paru -Qua (AUR updates)");
- Some(
- Command::new("paru")
- .args(["-Qua"])
- .stdin(Stdio::null())
- .stdout(Stdio::piped())
- .stderr(Stdio::null())
- .output(),
- )
- } else if has_yay {
- tracing::debug!("Executing: yay -Qua (AUR updates)");
- Some(
- Command::new("yay")
- .args(["-Qua"])
- .stdin(Stdio::null())
- .stdout(Stdio::piped())
- .stderr(Stdio::null())
- .output(),
- )
- } else {
- tracing::debug!("No AUR helper available (paru/yay), skipping AUR updates check");
- None
- };
-
- // Collect packages from both commands
- // Use HashMap to store: package_name -> formatted_string
- // Use HashSet to track unique package names for deduplication
- let mut packages_map: std::collections::HashMap =
- std::collections::HashMap::new();
- let mut packages_set = HashSet::new();
-
- // Parse pacman -Qu output (official repos)
- process_checkupdates_output(output_checkupdates, &mut packages_map, &mut packages_set);
-
- // Parse -Qua output (AUR)
- process_qua_output(output_qua, helper, &mut packages_map, &mut packages_set);
+/// What: Spawn background workers for status, news, announcements, and tick events.
+///
+/// Inputs:
+/// - `headless`: When `true`, skip terminal-dependent operations
+/// - `status_tx`: Channel sender for Arch status updates
+/// - `news_tx`: Channel sender for Arch news updates
+/// - `news_feed_tx`: Channel sender for aggregated news feed (Arch news + advisories)
+/// - `news_incremental_tx`: Channel sender for incremental background news items
+/// - `announcement_tx`: Channel sender for remote announcement updates
+/// - `tick_tx`: Channel sender for tick events
+/// - `news_read_ids`: Set of already-read news IDs
+/// - `news_read_urls`: Set of already-read news URLs
+/// - `official_index_path`: Path to official package index
+/// - `net_err_tx`: Channel sender for network errors
+/// - `index_notify_tx`: Channel sender for index update notifications
+/// - `updates_tx`: Channel sender for package updates
+/// - `updates_refresh_interval`: Refresh interval in seconds for pacman -Qu and AUR helper checks
+/// - `installed_packages_mode`: Filter mode for installed packages (leaf only vs all explicit)
+/// - `get_announcement`: Whether to fetch remote announcements from GitHub Gist
+/// - `last_startup_timestamp`: Previous TUI startup time (`YYYYMMDD:HHMMSS`) for incremental updates
+///
+/// Details:
+/// - Fetches Arch status text once at startup and periodically every 120 seconds
+/// - Fetches Arch news once at startup, filtering out already-read items
+/// - Fetches remote announcement once at startup if URL is configured
+/// - Updates package index in background (Windows vs non-Windows handling)
+/// - Refreshes pacman caches (installed, explicit) using the configured installed packages mode
+/// - Spawns tick worker that sends events every 200ms
+/// - Checks for available package updates once at startup and periodically at configured interval
+#[allow(clippy::too_many_arguments)]
+pub fn spawn_auxiliary_workers(
+ headless: bool,
+ status_tx: &mpsc::UnboundedSender<(String, ArchStatusColor)>,
+ news_tx: &mpsc::UnboundedSender>,
+ news_feed_tx: &mpsc::UnboundedSender,
+ news_incremental_tx: &mpsc::UnboundedSender,
+ announcement_tx: &mpsc::UnboundedSender,
+ tick_tx: &mpsc::UnboundedSender<()>,
+ news_read_ids: &std::collections::HashSet,
+ news_read_urls: &std::collections::HashSet,
+ news_seen_pkg_versions: &std::collections::HashMap,
+ news_seen_aur_comments: &std::collections::HashMap,
+ official_index_path: &std::path::Path,
+ net_err_tx: &mpsc::UnboundedSender,
+ index_notify_tx: &mpsc::UnboundedSender<()>,
+ updates_tx: &mpsc::UnboundedSender<(usize, Vec)>,
+ updates_refresh_interval: u64,
+ installed_packages_mode: crate::state::InstalledPackagesMode,
+ get_announcement: bool,
+ last_startup_timestamp: Option<&str>,
+) {
+ tracing::info!(
+ headless,
+ get_announcement,
+ updates_refresh_interval,
+ "auxiliary workers starting"
+ );
+
+ // Spawn status worker (skip in headless mode)
+ if !headless {
+ spawn_status_worker(status_tx);
+ }
- // Convert to Vec of formatted strings, sorted by package name
- let mut package_names: Vec = packages_set.into_iter().collect();
- package_names.sort_unstable();
+ // Handle news workers
+ if headless {
+ tracing::info!("headless mode: skipping news/advisory fetch and announcements");
+ // In headless mode, send empty array to news channel to ensure event loop doesn't hang
+ let news_tx_headless = news_tx.clone();
+ tokio::spawn(async move {
+ tracing::debug!("headless mode: sending empty news array to clear any pending waits");
+ let _ = news_tx_headless.send(Vec::new());
+ });
+ } else {
+ // Create a oneshot channel to coordinate startup and aggregated news fetches
+ // This prevents concurrent requests to archlinux.org which can cause rate limiting/blocking
+ let (completion_tx, completion_rx) = oneshot::channel();
+ news::spawn_startup_news_worker(
+ news_tx,
+ news_read_ids,
+ news_read_urls,
+ news_seen_pkg_versions,
+ news_seen_aur_comments,
+ last_startup_timestamp,
+ Some(completion_tx),
+ );
+ news::spawn_aggregated_news_feed_worker(
+ news_feed_tx,
+ news_incremental_tx,
+ news_seen_pkg_versions,
+ news_seen_aur_comments,
+ Some(completion_rx),
+ );
+ }
- let packages: Vec = package_names
- .iter()
- .filter_map(|name| packages_map.get(name).cloned())
- .collect();
+ // Spawn announcement worker (skip in headless mode)
+ if !headless && get_announcement {
+ spawn_announcement_worker(announcement_tx);
+ }
- let count = packages.len();
- tracing::debug!(
- "Update check completed: found {} total available updates (after deduplication)",
- count
- );
+ // Spawn index update worker (platform-specific)
+ #[cfg(windows)]
+ spawn_index_update_worker(official_index_path, net_err_tx, index_notify_tx);
+ #[cfg(not(windows))]
+ spawn_index_update_worker(headless, official_index_path, net_err_tx, index_notify_tx);
- // Save to file
- let lists_dir = crate::theme::lists_dir();
- let updates_file = lists_dir.join("available_updates.txt");
- if let Err(e) = std::fs::write(&updates_file, packages.join("\n")) {
- tracing::warn!("Failed to save updates list to file: {}", e);
- } else {
- tracing::debug!("Saved updates list to {:?}", updates_file);
- }
+ // Spawn cache refresh worker (skip in headless mode)
+ if !headless {
+ spawn_cache_refresh_worker(installed_packages_mode);
+ }
- // Return count and package names (for display) - not the formatted strings
- (count, package_names)
- })
- .await;
+ // Spawn periodic updates worker (skip in headless mode)
+ if !headless {
+ updates::spawn_periodic_updates_worker(updates_tx, updates_refresh_interval);
+ }
- // Reset flag when done (even on error)
- let mutex = UPDATE_CHECK_IN_PROGRESS.get_or_init(|| tokio::sync::Mutex::new(false));
- let mut in_progress = mutex.lock().await;
- *in_progress = false;
- drop(in_progress);
+ // Spawn tick worker (always runs)
+ spawn_tick_worker(tick_tx);
- match result {
- Ok((count, list)) => {
- let _ = updates_tx_once.send((count, list));
- }
- Err(e) => {
- tracing::error!("Updates worker task panicked: {:?}", e);
- let _ = updates_tx_once.send((0, Vec::new()));
- }
- }
- });
+ // Spawn faillock worker (skip in headless mode)
+ if !headless {
+ spawn_faillock_worker(tick_tx);
+ }
}
/// What: Spawn event reading thread for terminal input.
@@ -789,80 +405,3 @@ pub fn spawn_event_thread(
});
}
}
-
-#[cfg(test)]
-mod tests {
- use super::parse_checkupdates;
-
- /// What: Test that pacman -Qu parsing correctly extracts old and new versions.
- ///
- /// Inputs:
- /// - Sample pacman -Qu output with format `"package-name old_version -> new_version"`
- ///
- /// Output:
- /// - Verifies that `old_version` and `new_version` are correctly parsed and different
- ///
- /// Details:
- /// - Tests parsing of pacman -Qu output format
- #[test]
- fn test_parse_checkupdates_extracts_correct_versions() {
- let test_cases = vec![
- ("bat 0.26.0-1 -> 0.26.0-2", "bat", "0.26.0-1", "0.26.0-2"),
- (
- "comgr 2:6.4.4-2 -> 2:7.1.0-1",
- "comgr",
- "2:6.4.4-2",
- "2:7.1.0-1",
- ),
- (
- "composable-kernel 6.4.4-1 -> 7.1.0-1",
- "composable-kernel",
- "6.4.4-1",
- "7.1.0-1",
- ),
- ];
-
- for (input, expected_name, expected_old, expected_new) in test_cases {
- let output = input.as_bytes();
- let entries = parse_checkupdates(output);
-
- assert_eq!(entries.len(), 1, "Failed to parse: {input}");
- let (name, old_version, new_version) = &entries[0];
- assert_eq!(name, expected_name, "Wrong name for: {input}");
- assert_eq!(old_version, expected_old, "Wrong old_version for: {input}");
- assert_eq!(new_version, expected_new, "Wrong new_version for: {input}");
- }
- }
-
- /// What: Test that pacman -Qu parsing handles multiple packages.
- ///
- /// Inputs:
- /// - Multi-line pacman -Qu output
- ///
- /// Output:
- /// - Verifies that all packages are parsed correctly
- #[test]
- fn test_parse_checkupdates_multiple_packages() {
- let input = "bat 0.26.0-1 -> 0.26.0-2\ncomgr 2:6.4.4-2 -> 2:7.1.0-1\n";
- let output = input.as_bytes();
- let entries = parse_checkupdates(output);
-
- assert_eq!(entries.len(), 2);
- assert_eq!(
- entries[0],
- (
- "bat".to_string(),
- "0.26.0-1".to_string(),
- "0.26.0-2".to_string()
- )
- );
- assert_eq!(
- entries[1],
- (
- "comgr".to_string(),
- "2:6.4.4-2".to_string(),
- "2:7.1.0-1".to_string()
- )
- );
- }
-}
diff --git a/src/app/runtime/workers/mod.rs b/src/app/runtime/workers/mod.rs
index cac8c4c30..7137e612e 100644
--- a/src/app/runtime/workers/mod.rs
+++ b/src/app/runtime/workers/mod.rs
@@ -1,6 +1,22 @@
+/// Auxiliary background workers (status, news, tick, index updates).
pub mod auxiliary;
+/// AUR comments fetching worker.
pub mod comments;
+/// Package details fetching worker.
pub mod details;
+/// Package installation/removal executor worker.
pub mod executor;
+/// News feed filtering and worker functions.
+pub mod news;
+/// News article content fetching worker.
+pub mod news_content;
+/// Preflight analysis workers (dependencies, files, services, sandbox, summary).
pub mod preflight;
+/// Package search worker.
pub mod search;
+/// Package update checking, parsing, and worker functions.
+pub mod updates;
+/// Helper functions for update checking (system checks, temp DB).
+mod updates_helpers;
+/// Parsing functions for update command output.
+mod updates_parsing;
diff --git a/src/app/runtime/workers/news.rs b/src/app/runtime/workers/news.rs
new file mode 100644
index 000000000..695343838
--- /dev/null
+++ b/src/app/runtime/workers/news.rs
@@ -0,0 +1,448 @@
+use std::collections::HashSet;
+
+use rand::Rng;
+use tokio::{sync::mpsc, sync::oneshot, time::Duration};
+
+use crate::index as pkgindex;
+use crate::sources;
+use crate::state::types::{NewsFeedSource, NewsSortMode};
+
+/// What: Ensures installed packages set is populated, refreshing caches if needed.
+///
+/// Inputs:
+/// - `installed`: Initial set of installed package names
+///
+/// Output:
+/// - `HashSet` with installed package names (refreshed if needed)
+///
+/// Details:
+/// - If the initial set is empty, refreshes installed and explicit caches
+/// - Returns refreshed set if available, otherwise returns original set
+pub async fn ensure_installed_set(installed: HashSet) -> HashSet {
+ if installed.is_empty() {
+ crate::index::refresh_installed_cache().await;
+ crate::index::refresh_explicit_cache(crate::state::InstalledPackagesMode::AllExplicit)
+ .await;
+ let refreshed: HashSet = pkgindex::explicit_names().into_iter().collect();
+ if !refreshed.is_empty() {
+ return refreshed;
+ }
+ }
+ installed
+}
+
+/// What: Filters news feed items by source type based on startup news preferences.
+///
+/// Inputs:
+/// - `feed`: Vector of news feed items to filter
+/// - `prefs`: Theme settings containing startup news preferences
+///
+/// Output:
+/// - Filtered vector of news feed items
+///
+/// Details:
+/// - Filters items based on whether each source type is enabled in preferences
+pub fn filter_news_by_source(
+ feed: Vec,
+ prefs: &crate::theme::Settings,
+) -> Vec {
+ feed.into_iter()
+ .filter(|item| match item.source {
+ crate::state::types::NewsFeedSource::ArchNews => prefs.startup_news_show_arch_news,
+ crate::state::types::NewsFeedSource::SecurityAdvisory => {
+ prefs.startup_news_show_advisories
+ }
+ crate::state::types::NewsFeedSource::InstalledPackageUpdate => {
+ prefs.startup_news_show_pkg_updates
+ }
+ crate::state::types::NewsFeedSource::AurPackageUpdate => {
+ prefs.startup_news_show_aur_updates
+ }
+ crate::state::types::NewsFeedSource::AurComment => prefs.startup_news_show_aur_comments,
+ })
+ .collect()
+}
+
+/// What: Filters news feed items by maximum age in days.
+///
+/// Inputs:
+/// - `feed`: Vector of news feed items to filter
+/// - `max_age_days`: Optional maximum age in days
+///
+/// Output:
+/// - Filtered vector of news feed items
+///
+/// Details:
+/// - If `max_age_days` is Some, filters out items older than the cutoff date
+/// - If `max_age_days` is None, returns all items unchanged
+pub fn filter_news_by_age(
+ feed: Vec,
+ max_age_days: Option,
+) -> Vec {
+ if let Some(max_days) = max_age_days {
+ let cutoff_date = chrono::Utc::now()
+ .checked_sub_signed(chrono::Duration::days(i64::from(max_days)))
+ .map(|dt| dt.format("%Y-%m-%d").to_string());
+ #[allow(clippy::unnecessary_map_or)]
+ feed.into_iter()
+ .filter(|item| {
+ cutoff_date
+ .as_ref()
+ .map_or(true, |cutoff| &item.date >= cutoff)
+ })
+ .collect()
+ } else {
+ feed
+ }
+}
+
+/// What: Filters out already-read news items by ID and URL.
+///
+/// Inputs:
+/// - `feed`: Vector of news feed items to filter
+/// - `read_ids`: Set of already-read news IDs
+/// - `read_urls`: Set of already-read news URLs
+///
+/// Output:
+/// - Filtered vector containing only unread items
+///
+/// Details:
+/// - Removes items whose ID is in the `read_ids` set or whose URL is in the `read_urls` set
+/// - Package updates and AUR comments are tracked by ID, while Arch news items are tracked by URL
+pub fn filter_unread_news(
+ feed: Vec,
+ read_ids: &HashSet,
+ read_urls: &HashSet,
+) -> Vec {
+ feed.into_iter()
+ .filter(|item| {
+ !read_ids.contains(&item.id)
+ && item.url.as_ref().is_none_or(|url| !read_urls.contains(url))
+ })
+ .collect()
+}
+
+/// What: Spawns startup news worker that fetches and filters news items for startup popup.
+///
+/// Inputs:
+/// - `news_tx`: Channel sender for startup news updates
+/// - `news_read_ids`: Set of already-read news IDs
+/// - `news_read_urls`: Set of already-read news URLs
+/// - `news_seen_pkg_versions`: Map of seen package versions
+/// - `news_seen_aur_comments`: Map of seen AUR comments
+/// - `last_startup_timestamp`: Previous TUI startup time for incremental updates
+/// - `completion_tx`: Optional oneshot sender to signal completion
+///
+/// Output:
+/// - None (spawns async task)
+///
+/// Details:
+/// - Fetches news items based on startup news preferences
+/// - Filters by source type, max age, and read status (by both ID and URL)
+/// - Sends filtered items to the news channel
+pub fn spawn_startup_news_worker(
+ news_tx: &mpsc::UnboundedSender>,
+ news_read_ids: &HashSet,
+ news_read_urls: &HashSet,
+ news_seen_pkg_versions: &std::collections::HashMap,
+ news_seen_aur_comments: &std::collections::HashMap,
+ last_startup_timestamp: Option<&str>,
+ completion_tx: Option>,
+) {
+ let prefs = crate::theme::settings();
+ if !prefs.startup_news_configured {
+ // If startup news is not configured, signal completion immediately
+ if let Some(tx) = completion_tx {
+ let _ = tx.send(());
+ }
+ return;
+ }
+
+ let news_tx_once = news_tx.clone();
+ let read_ids = news_read_ids.clone();
+ let read_urls = news_read_urls.clone();
+ let installed: HashSet = pkgindex::explicit_names().into_iter().collect();
+ let mut seen_versions = news_seen_pkg_versions.clone();
+ let mut seen_aur_comments = news_seen_aur_comments.clone();
+ let last_startup = last_startup_timestamp.map(str::to_owned);
+ tracing::info!(
+ read_ids = read_ids.len(),
+ read_urls = read_urls.len(),
+ last_startup = ?last_startup,
+ "queueing startup news fetch (startup)"
+ );
+ tokio::spawn(async move {
+ // Use random jitter (0-500ms) before startup news fetch
+ // Keep this short since the startup popup should appear quickly
+ let jitter_ms = rand::rng().random_range(0..=500_u64);
+ if jitter_ms > 0 {
+ tracing::info!(jitter_ms, "staggering startup news fetch");
+ tokio::time::sleep(Duration::from_millis(jitter_ms)).await;
+ }
+ tracing::info!("startup news fetch task started");
+ let optimized_max_age = sources::optimize_max_age_for_startup(
+ last_startup.as_deref(),
+ prefs.startup_news_max_age_days,
+ );
+ let installed_set = ensure_installed_set(installed).await;
+ let include_pkg_updates =
+ prefs.startup_news_show_pkg_updates || prefs.startup_news_show_aur_updates;
+ #[allow(clippy::items_after_statements)]
+ const STARTUP_NEWS_LIMIT: usize = 20;
+ let updates_limit =
+ if prefs.startup_news_show_pkg_updates && prefs.startup_news_show_aur_updates {
+ STARTUP_NEWS_LIMIT * 2
+ } else {
+ STARTUP_NEWS_LIMIT
+ };
+ let ctx = sources::NewsFeedContext {
+ force_emit_all: true,
+ updates_list_path: Some(crate::theme::lists_dir().join("available_updates.txt")),
+ limit: updates_limit,
+ include_arch_news: prefs.startup_news_show_arch_news,
+ include_advisories: prefs.startup_news_show_advisories,
+ include_pkg_updates,
+ include_aur_comments: prefs.startup_news_show_aur_comments,
+ installed_filter: Some(&installed_set),
+ installed_only: false,
+ sort_mode: NewsSortMode::DateDesc,
+ seen_pkg_versions: &mut seen_versions,
+ seen_aur_comments: &mut seen_aur_comments,
+ max_age_days: optimized_max_age,
+ };
+ tracing::info!(
+ limit = updates_limit,
+ include_arch_news = prefs.startup_news_show_arch_news,
+ include_advisories = prefs.startup_news_show_advisories,
+ include_pkg_updates,
+ include_aur_comments = prefs.startup_news_show_aur_comments,
+ configured_max_age = ?prefs.startup_news_max_age_days,
+ optimized_max_age = ?optimized_max_age,
+ installed_count = installed_set.len(),
+ "starting startup news fetch"
+ );
+ match sources::fetch_news_feed(ctx).await {
+ Ok(feed) => {
+ tracing::info!(
+ total_items = feed.len(),
+ "startup news fetch completed successfully"
+ );
+ let source_filtered = filter_news_by_source(feed, &prefs);
+ let filtered = filter_news_by_age(source_filtered, prefs.startup_news_max_age_days);
+ let unread = filter_unread_news(filtered, &read_ids, &read_urls);
+ tracing::info!(
+ unread_count = unread.len(),
+ "sending startup news items to channel"
+ );
+ match news_tx_once.send(unread) {
+ Ok(()) => {
+ tracing::info!("startup news items sent to channel successfully");
+ }
+ Err(e) => {
+ tracing::error!(
+ error = %e,
+ "failed to send startup news items to channel (receiver dropped?)"
+ );
+ }
+ }
+ }
+ Err(e) => {
+ tracing::warn!(error = %e, "startup news fetch failed");
+ tracing::info!("sending empty array to clear loading flag after fetch error");
+ let _ = news_tx_once.send(Vec::new());
+ }
+ }
+ // Signal completion to allow aggregated feed fetch to proceed
+ if let Some(tx) = completion_tx {
+ let _ = tx.send(());
+ }
+ });
+}
+
+/// What: Spawns aggregated news feed worker that fetches combined news feed.
+///
+/// Inputs:
+/// - `news_feed_tx`: Channel sender for aggregated news feed
+/// - `news_incremental_tx`: Channel sender for incremental background news items
+/// - `news_seen_pkg_versions`: Map of seen package versions
+/// - `news_seen_aur_comments`: Map of seen AUR comments
+/// - `completion_rx`: Optional oneshot receiver to wait for startup news fetch completion
+///
+/// Output:
+/// - None (spawns async task)
+///
+/// Details:
+/// - Fetches aggregated news feed (Arch news + security advisories + package updates + AUR comments)
+/// - Sends feed payload to the news feed channel
+/// - Spawns background continuation task to fetch remaining items after initial limit
+/// - Waits for startup news fetch to complete before starting to prevent concurrent archlinux.org requests
+pub fn spawn_aggregated_news_feed_worker(
+ news_feed_tx: &mpsc::UnboundedSender,
+ news_incremental_tx: &mpsc::UnboundedSender,
+ news_seen_pkg_versions: &std::collections::HashMap,
+ news_seen_aur_comments: &std::collections::HashMap,
+ completion_rx: Option>,
+) {
+ let news_feed_tx_once = news_feed_tx.clone();
+ let news_incremental_tx_clone = news_incremental_tx.clone();
+ let installed: HashSet = pkgindex::explicit_names().into_iter().collect();
+ let mut seen_versions = news_seen_pkg_versions.clone();
+ let mut seen_aur_comments = news_seen_aur_comments.clone();
+ tracing::info!(
+ installed_names = installed.len(),
+ "queueing combined news feed fetch (startup)"
+ );
+ tokio::spawn(async move {
+ // Wait for startup news fetch to complete before starting aggregated feed fetch
+ // This prevents concurrent requests to archlinux.org which can cause rate limiting/blocking
+ if let Some(rx) = completion_rx {
+ tracing::info!(
+ "waiting for startup news fetch to complete before starting aggregated feed fetch"
+ );
+ let _ = rx.await; // Wait for startup fetch completion signal
+ // Add a small additional delay after startup fetch completes to ensure clean separation
+ let additional_delay_ms = rand::rng().random_range(500..=1500_u64);
+ tracing::info!(
+ additional_delay_ms,
+ "additional delay after startup fetch completion"
+ );
+ tokio::time::sleep(Duration::from_millis(additional_delay_ms)).await;
+ } else {
+ // Fallback: use fixed delay if no completion signal is provided
+ // This should not happen in normal operation, but provides safety
+ let base_delay_ms = 10000_u64; // Increased to 10 seconds as fallback
+ let jitter_ms = rand::rng().random_range(0..=2000_u64);
+ let stagger_ms = base_delay_ms + jitter_ms;
+ tracing::warn!(
+ stagger_ms,
+ "no completion signal available, using fallback delay for aggregated feed fetch"
+ );
+ tokio::time::sleep(Duration::from_millis(stagger_ms)).await;
+ }
+ let installed_set = ensure_installed_set(installed).await;
+ let ctx = sources::NewsFeedContext {
+ force_emit_all: true,
+ updates_list_path: Some(crate::theme::lists_dir().join("available_updates.txt")),
+ limit: 50,
+ include_arch_news: true,
+ include_advisories: true,
+ include_pkg_updates: true,
+ include_aur_comments: true,
+ installed_filter: Some(&installed_set),
+ installed_only: false,
+ sort_mode: NewsSortMode::DateDesc,
+ seen_pkg_versions: &mut seen_versions,
+ seen_aur_comments: &mut seen_aur_comments,
+ max_age_days: None, // Main feed doesn't use date filtering
+ };
+ match sources::fetch_news_feed(ctx).await {
+ Ok(feed) => {
+ let arch_ct = feed
+ .iter()
+ .filter(|i| matches!(i.source, NewsFeedSource::ArchNews))
+ .count();
+ let adv_ct = feed
+ .iter()
+ .filter(|i| matches!(i.source, NewsFeedSource::SecurityAdvisory))
+ .count();
+ tracing::info!(
+ total = feed.len(),
+ arch = arch_ct,
+ advisories = adv_ct,
+ installed_names = installed_set.len(),
+ "news feed fetched"
+ );
+ if feed.is_empty() {
+ tracing::warn!(
+ installed_names = installed_set.len(),
+ "news feed is empty after fetch"
+ );
+ }
+ let payload = crate::state::types::NewsFeedPayload {
+ items: feed.clone(),
+ seen_pkg_versions: seen_versions,
+ seen_aur_comments,
+ };
+ tracing::info!(
+ items_count = feed.len(),
+ "sending aggregated news feed payload to channel"
+ );
+ if let Err(e) = news_feed_tx_once.send(payload) {
+ tracing::warn!(error = ?e, "failed to send news feed to channel");
+ } else {
+ tracing::info!("aggregated news feed payload sent successfully");
+ // Spawn background continuation task to fetch remaining items
+ let initial_ids: HashSet = feed.iter().map(|i| i.id.clone()).collect();
+ spawn_news_continuation_worker(
+ news_incremental_tx_clone.clone(),
+ installed_set.clone(),
+ initial_ids,
+ );
+ }
+ }
+ Err(e) => {
+ tracing::warn!(error = %e, "failed to fetch news feed");
+ }
+ }
+ });
+}
+
+/// What: Spawns background worker to continue fetching news items after initial limit.
+///
+/// Inputs:
+/// - `news_incremental_tx`: Channel sender for incremental news items
+/// - `installed_set`: Set of installed package names
+/// - `initial_ids`: Set of item IDs already sent in initial batch
+///
+/// Output:
+/// - None (spawns async task)
+///
+/// Details:
+/// - Fetches remaining items from all news sources (no limit)
+/// - Sends one item per second to the channel
+/// - Skips items already in `initial_ids`
+fn spawn_news_continuation_worker(
+ news_incremental_tx: mpsc::UnboundedSender,
+ installed_set: HashSet,
+ initial_ids: HashSet,
+) {
+ tokio::spawn(async move {
+ tracing::info!(
+ initial_count = initial_ids.len(),
+ "starting news continuation worker"
+ );
+
+ // Wait a bit before starting continuation to let UI settle
+ tokio::time::sleep(Duration::from_secs(2)).await;
+
+ // Fetch continuation items from sources (high limit to get everything)
+ let continuation_items =
+ sources::fetch_continuation_items(&installed_set, &initial_ids).await;
+
+ match continuation_items {
+ Ok(items) => {
+ tracing::info!(
+ count = items.len(),
+ "continuation worker received items to send"
+ );
+ for item in items {
+ // Skip if already sent in initial batch
+ if initial_ids.contains(&item.id) {
+ continue;
+ }
+ // Send item to channel
+ if let Err(e) = news_incremental_tx.send(item.clone()) {
+ tracing::warn!(error = ?e, "failed to send incremental news item");
+ break;
+ }
+ // Throttle: 1 item per second
+ tokio::time::sleep(Duration::from_secs(1)).await;
+ }
+ tracing::info!("news continuation worker completed");
+ }
+ Err(e) => {
+ tracing::warn!(error = %e, "news continuation fetch failed");
+ }
+ }
+ });
+}
diff --git a/src/app/runtime/workers/news_content.rs b/src/app/runtime/workers/news_content.rs
new file mode 100644
index 000000000..bbc29f939
--- /dev/null
+++ b/src/app/runtime/workers/news_content.rs
@@ -0,0 +1,93 @@
+//! Background worker for fetching news article content.
+
+use std::time::Instant;
+use tokio::sync::mpsc;
+
+use crate::sources;
+
+/// What: Spawn background worker for news article content fetching.
+///
+/// Inputs:
+/// - `news_content_req_rx`: Channel receiver for content requests (URL as String)
+/// - `news_content_res_tx`: Channel sender for content responses (URL, content)
+///
+/// Output:
+/// - None (spawns async task)
+///
+/// Details:
+/// - Listens for URL requests on the channel
+/// - Drains stale requests and only processes the most recent one
+/// - This prevents queue buildup when users scroll quickly through items
+/// - Fetches article content asynchronously using `fetch_news_content`
+/// - Sends results as `(String, String)` with URL and content
+/// - On error, sends error message as content string
+pub fn spawn_news_content_worker(
+ mut news_content_req_rx: mpsc::UnboundedReceiver,
+ news_content_res_tx: mpsc::UnboundedSender<(String, String)>,
+) {
+ tokio::spawn(async move {
+ while let Some(mut url) = news_content_req_rx.recv().await {
+ // Drain any pending requests and use the most recent one
+ // This prevents queue buildup when users scroll quickly or when
+ // slow requests (e.g., unreachable hosts) block the queue
+ let mut skipped = 0usize;
+ while let Ok(newer_url) = news_content_req_rx.try_recv() {
+ skipped += 1;
+ url = newer_url;
+ }
+ if skipped > 0 {
+ tracing::debug!(
+ skipped,
+ url = %url,
+ "news_content_worker: drained stale requests, processing most recent"
+ );
+ }
+
+ let url_clone = url.clone();
+ let started = Instant::now();
+ tracing::info!(url = %url_clone, "news_content_worker: fetch start");
+ match sources::fetch_news_content(&url).await {
+ Ok(content) => {
+ tracing::debug!(
+ url = %url_clone,
+ elapsed_ms = started.elapsed().as_millis(),
+ len = content.len(),
+ "news_content_worker: fetch success"
+ );
+ let _ = news_content_res_tx.send((url_clone, content));
+ }
+ Err(e) => {
+ tracing::warn!(
+ error = %e,
+ url = %url_clone,
+ elapsed_ms = started.elapsed().as_millis(),
+ "news_content_worker: fetch failed"
+ );
+ let _ = news_content_res_tx
+ .send((url_clone, format!("Failed to load content: {e}")));
+ }
+ }
+ }
+ });
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ /// What: Test error message format for failed content fetches.
+ ///
+ /// Inputs:
+ /// - Error string from `fetch_news_content`.
+ ///
+ /// Output:
+ /// - Error message formatted as "Failed to load content: {error}".
+ ///
+ /// Details:
+ /// - Verifies error message format matches worker behavior.
+ fn test_news_content_worker_error_format() {
+ let error = "Network error";
+ let error_msg = format!("Failed to load content: {error}");
+ assert!(error_msg.contains("Failed to load content"));
+ assert!(error_msg.contains(error));
+ }
+}
diff --git a/src/app/runtime/workers/updates.rs b/src/app/runtime/workers/updates.rs
new file mode 100644
index 000000000..7850fb8c9
--- /dev/null
+++ b/src/app/runtime/workers/updates.rs
@@ -0,0 +1,426 @@
+use std::sync::OnceLock;
+use tokio::sync::mpsc;
+
+use crate::app::runtime::workers::updates_helpers::{
+ check_aur_helper, has_checkupdates, has_fakeroot, setup_temp_db, sync_temp_db,
+};
+use crate::app::runtime::workers::updates_parsing::{
+ get_installed_version, parse_checkupdates, parse_checkupdates_tool, parse_qua,
+};
+
+/// What: Process pacman -Qu or checkupdates output and add packages to collections.
+///
+/// Inputs:
+/// - `output`: Command output result
+/// - `is_checkupdates_tool`: `true` if output is from checkupdates tool, `false` if from pacman -Qu
+/// - `packages_map`: Mutable `HashMap` to store formatted package strings
+/// - `packages_set`: Mutable `HashSet` to track unique package names
+fn process_checkupdates_output(
+ output: Result,
+ is_checkupdates_tool: bool,
+ packages_map: &mut std::collections::HashMap,
+ packages_set: &mut std::collections::HashSet,
+) {
+ match output {
+ Ok(output) => {
+ let exit_code = output.status.code();
+ if output.status.success() {
+ if is_checkupdates_tool {
+ // Parse checkupdates output (package-name version format)
+ let packages = parse_checkupdates_tool(&output.stdout);
+ let count = packages.len();
+
+ for (name, new_version) in packages {
+ // Get old version from installed packages
+ let old_version =
+ get_installed_version(&name).unwrap_or_else(|| "unknown".to_string());
+ // Format: "name - old_version -> name - new_version"
+ let formatted = format!("{name} - {old_version} -> {name} - {new_version}");
+ packages_map.insert(name.clone(), formatted);
+ packages_set.insert(name);
+ }
+
+ tracing::debug!(
+ "checkupdates completed successfully (exit code: {:?}): found {} packages from official repos",
+ exit_code,
+ count
+ );
+ } else {
+ // Parse pacman -Qu output (package-name old_version -> new_version format)
+ let packages = parse_checkupdates(&output.stdout);
+ let count = packages.len();
+
+ for (name, old_version, new_version) in packages {
+ // Format: "name - old_version -> name - new_version"
+ let formatted = format!("{name} - {old_version} -> {name} - {new_version}");
+ packages_map.insert(name.clone(), formatted);
+ packages_set.insert(name);
+ }
+
+ tracing::debug!(
+ "pacman -Qu completed successfully (exit code: {:?}): found {} packages from official repos",
+ exit_code,
+ count
+ );
+ }
+ } else if output.status.code() == Some(1) {
+ // Exit code 1 is normal (no updates)
+ if is_checkupdates_tool {
+ tracing::debug!(
+ "checkupdates returned exit code 1 (no updates available in official repos)"
+ );
+ } else {
+ tracing::debug!(
+ "pacman -Qu returned exit code 1 (no updates available in official repos)"
+ );
+ }
+ } else {
+ // Other exit codes are errors
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if is_checkupdates_tool {
+ tracing::warn!(
+ "checkupdates command failed with exit code: {:?}, stderr: {}",
+ exit_code,
+ stderr.trim()
+ );
+ } else {
+ tracing::warn!("pacman -Qu command failed with exit code: {:?}", exit_code);
+ }
+ }
+ }
+ Err(e) => {
+ if is_checkupdates_tool {
+ tracing::warn!("Failed to execute checkupdates: {}", e);
+ } else {
+ tracing::warn!("Failed to execute pacman -Qu: {}", e);
+ }
+ }
+ }
+}
+
+/// What: Process -Qua output and add packages to collections.
+///
+/// Inputs:
+/// - `result`: Command output result
+/// - `helper`: Helper name for logging
+/// - `packages_map`: Mutable `HashMap` to store formatted package strings
+/// - `packages_set`: Mutable `HashSet` to track unique package names
+fn process_qua_output(
+ result: Option