mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-13 09:18:40 -09:00
Compare commits
584 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b3d0a670e | ||
|
|
f452244576 | ||
|
|
14bb8d0dd1 | ||
|
|
19cfdcf543 | ||
|
|
5129de737e | ||
|
|
68671a1af1 | ||
|
|
74c91768a0 | ||
|
|
ac4d689e22 | ||
|
|
66b875eb00 | ||
|
|
67f34090fb | ||
|
|
5b96a48f53 | ||
|
|
58e6458130 | ||
|
|
653c3bfe70 | ||
|
|
beac59fb12 | ||
|
|
d97a7f73dd | ||
|
|
35a9faadb2 | ||
|
|
e43d22ba3f | ||
|
|
f4e4694612 | ||
|
|
c9f3d39d3e | ||
|
|
2b4c1ca03e | ||
|
|
af7163acf6 | ||
|
|
5438312440 | ||
|
|
0918096301 | ||
|
|
ee1d80da6a | ||
|
|
93a81a3898 | ||
|
|
cf3a8f3043 | ||
|
|
f7fe61194b | ||
|
|
456cde16df | ||
|
|
3c77dc458a | ||
|
|
ce9921846c | ||
|
|
e0ab5d40c7 | ||
|
|
607b07a30e | ||
|
|
b4dc9f1d4d | ||
|
|
2e5c767d4c | ||
|
|
5d5182ede3 | ||
|
|
db96bc698c | ||
|
|
4873a7c765 | ||
|
|
49d43f89a1 | ||
|
|
e5a6a1c5ea | ||
|
|
5593fa2233 | ||
|
|
9d51df02c1 | ||
|
|
7ed209a8e5 | ||
|
|
4bd6e3daba | ||
|
|
2fcd7f370c | ||
|
|
232a4f8741 | ||
|
|
bdc730f1e5 | ||
|
|
df5845baad | ||
|
|
0e6967498f | ||
|
|
bd442673d2 | ||
|
|
28d9eaecab | ||
|
|
61d7fc8473 | ||
|
|
e142d012f0 | ||
|
|
39517c01a8 | ||
|
|
cc26ead5f8 | ||
|
|
ca46c7241f | ||
|
|
e324804cdd | ||
|
|
26229d7a63 | ||
|
|
4ef9c3e817 | ||
|
|
c30699f93b | ||
|
|
6b7983b2f5 | ||
|
|
9418079da3 | ||
|
|
18f322c532 | ||
|
|
252f7fc253 | ||
|
|
49ef7cc34e | ||
|
|
17e384b485 | ||
|
|
ad166de925 | ||
|
|
22dca22450 | ||
|
|
30d6ddf149 | ||
|
|
8fb2290d5e | ||
|
|
90b43755b8 | ||
|
|
8ad61e87c1 | ||
|
|
85e12beb1c | ||
|
|
025e5ee99e | ||
|
|
cd00ae50d1 | ||
|
|
5a0c8c6175 | ||
|
|
ebaa9b6a89 | ||
|
|
f25bc5cbf4 | ||
|
|
87d336baeb | ||
|
|
0a6bcee32b | ||
|
|
3c055810d0 | ||
|
|
f6bffa40c7 | ||
|
|
08d373881c | ||
|
|
dc42c91619 | ||
|
|
2b2b4473e5 | ||
|
|
9cbe1bc91f | ||
|
|
f28f69d831 | ||
|
|
a5e57ee5ad | ||
|
|
b0d4fbe94f | ||
|
|
99f92934c6 | ||
|
|
9b517f27b1 | ||
|
|
705bd8907d | ||
|
|
e5508ba9b4 | ||
|
|
6bb31a4653 | ||
|
|
7558fd7f8e | ||
|
|
1e56c8604e | ||
|
|
d2e8dd8a90 | ||
|
|
ddbed8b07f | ||
|
|
db51987446 | ||
|
|
cc12c90dbc | ||
|
|
f2541d42ba | ||
|
|
c41467f240 | ||
|
|
2f97601736 | ||
|
|
574a4348a3 | ||
|
|
0215f3569d | ||
|
|
f4f4af4ee5 | ||
|
|
6db150cc98 | ||
|
|
a4484f27f3 | ||
|
|
d0d064aaf9 | ||
|
|
0e54ca775c | ||
|
|
d60bcb2113 | ||
|
|
e1818430b7 | ||
|
|
29bbab64b3 | ||
|
|
5944b738d0 | ||
|
|
946d2a0316 | ||
|
|
8ce5bae872 | ||
|
|
c41e3f5828 | ||
|
|
1fa40ae498 | ||
|
|
f03eee5443 | ||
|
|
491988d9a5 | ||
|
|
a2eb84e7d3 | ||
|
|
c83159f076 | ||
|
|
115de253a8 | ||
|
|
a71bc6eca5 | ||
|
|
ec99218645 | ||
|
|
83d3630ca7 | ||
|
|
ab6dc5be75 | ||
|
|
0d99781c67 | ||
|
|
e6cfacfa06 | ||
|
|
74be277249 | ||
|
|
46b88bcb5c | ||
|
|
ca1f293310 | ||
|
|
07a13d9c73 | ||
|
|
54d50e0443 | ||
|
|
ec233ff33a | ||
|
|
c002d9fa92 | ||
|
|
cebaaf0972 | ||
|
|
4d124c7c3d | ||
|
|
890e5a4af7 | ||
|
|
91281ef11f | ||
|
|
1452b91032 | ||
|
|
f7e774ee6e | ||
|
|
f37362af36 | ||
|
|
0d16b9f33e | ||
|
|
34dafffc62 | ||
|
|
1548f9276f | ||
|
|
d6728bca95 | ||
|
|
d523a77fdc | ||
|
|
f0764ea24e | ||
|
|
058b26bf9a | ||
|
|
e6806059e6 | ||
|
|
bb98939e24 | ||
|
|
0fc14173f2 | ||
|
|
2e4f0f0bce | ||
|
|
5f383966a9 | ||
|
|
3942722eba | ||
|
|
1a3de55e68 | ||
|
|
1f46dacf12 | ||
|
|
35dd631e55 | ||
|
|
f79ae654f3 | ||
|
|
e42db579a0 | ||
|
|
7d2905952d | ||
|
|
edf48f6f11 | ||
|
|
41f7ecafcb | ||
|
|
f46c7ec65d | ||
|
|
1b918a5a74 | ||
|
|
01f1e9188a | ||
|
|
ba26e6621b | ||
|
|
2b23951e4f | ||
|
|
a6f5678088 | ||
|
|
23c59f2874 | ||
|
|
6a68cd9b89 | ||
|
|
14b90444c9 | ||
|
|
5b462cfb7a | ||
|
|
7efd2c6251 | ||
|
|
90873ef956 | ||
|
|
8a23525cac | ||
|
|
929cc75675 | ||
|
|
fdb93bb9e6 | ||
|
|
d1adcde15c | ||
|
|
39a137c132 | ||
|
|
53d3e4c112 | ||
|
|
4b1da95835 | ||
|
|
88c8f13c35 | ||
|
|
900d31f6fd | ||
|
|
d005e7c685 | ||
|
|
b3c6f0f48a | ||
|
|
bfead635e4 | ||
|
|
f448e8ea67 | ||
|
|
1de70064e7 | ||
|
|
5929bf57cc | ||
|
|
ba14c0938f | ||
|
|
3acab71fce | ||
|
|
0d314ca0ca | ||
|
|
36bc405a69 | ||
|
|
b94db184f4 | ||
|
|
7055903677 | ||
|
|
e72768b86b | ||
|
|
a915fc0836 | ||
|
|
f473f3605e | ||
|
|
b96587c25f | ||
|
|
6f07a36923 | ||
|
|
c8636b8982 | ||
|
|
ee92f403ef | ||
|
|
e9c8d12c0f | ||
|
|
5a196125dc | ||
|
|
3a21dea2cd | ||
|
|
448fa9e7a6 | ||
|
|
6c2ab5001c | ||
|
|
ff3e3bccc6 | ||
|
|
618972b82b | ||
|
|
d910ed8b9f | ||
|
|
40f9dff5d6 | ||
|
|
cc1966d6a9 | ||
|
|
e6b2cff356 | ||
|
|
5264be76c7 | ||
|
|
59ef5fd27b | ||
|
|
2390308883 | ||
|
|
c077c5bed5 | ||
|
|
9474aa4329 | ||
|
|
7b3ebf9241 | ||
|
|
231ab1037d | ||
|
|
4cc422d628 | ||
|
|
b0e81ea4e9 | ||
|
|
9b59d3dac4 | ||
|
|
e12eb4556d | ||
|
|
d1eb7ba007 | ||
|
|
27cb599e22 | ||
|
|
a54c10bffb | ||
|
|
a28a0788c3 | ||
|
|
826c2fc067 | ||
|
|
3e27d37012 | ||
|
|
097f49d9e6 | ||
|
|
e2805da076 | ||
|
|
0783d35793 | ||
|
|
9337cdc99e | ||
|
|
a216bc2d35 | ||
|
|
1035aed81a | ||
|
|
a389443c9a | ||
|
|
c340980b80 | ||
|
|
19cfe9b15c | ||
|
|
239bbf542f | ||
|
|
d018dc0be6 | ||
|
|
1c4d191193 | ||
|
|
bff5da3547 | ||
|
|
08564ec7b6 | ||
|
|
c9ce16a633 | ||
|
|
684e9e04ad | ||
|
|
9a3727759c | ||
|
|
4a2def5223 | ||
|
|
1563e56223 | ||
|
|
c209b012b1 | ||
|
|
50b48a6435 | ||
|
|
e3742f0c80 | ||
|
|
3959210051 | ||
|
|
84834ff370 | ||
|
|
53e5080d9a | ||
|
|
61d268764d | ||
|
|
2bd83b3f22 | ||
|
|
70f439d9a9 | ||
|
|
39709aa665 | ||
|
|
bd22bf42ee | ||
|
|
227cdb35ae | ||
|
|
2fd4d8b406 | ||
|
|
888bd663c6 | ||
|
|
8d58297328 | ||
|
|
5c5cd163a1 | ||
|
|
2d76190091 | ||
|
|
ecb71f2550 | ||
|
|
7875a76bba | ||
|
|
a5e08cf597 | ||
|
|
62bc78d937 | ||
|
|
72189c307f | ||
|
|
dc8b6cfaab | ||
|
|
bb7b4196f2 | ||
|
|
cd38a62aa8 | ||
|
|
ec0de4afa8 | ||
|
|
8e021a46ee | ||
|
|
b6ddd491b3 | ||
|
|
882a32613d | ||
|
|
74efdfaf97 | ||
|
|
2409cc7a32 | ||
|
|
60fdac0680 | ||
|
|
2501fb1ad5 | ||
|
|
93583e645f | ||
|
|
bf8068b65e | ||
|
|
d95c65b032 | ||
|
|
9f2350bbc9 | ||
|
|
cbe24d6c8f | ||
|
|
d15fe8d08e | ||
|
|
fa19095e26 | ||
|
|
31126c6552 | ||
|
|
726b1542c6 | ||
|
|
48a266c348 | ||
|
|
14afd92ba2 | ||
|
|
7ba0f15f80 | ||
|
|
8107831a2b | ||
|
|
0e26ea95ef | ||
|
|
3e6affa73d | ||
|
|
1165342dcf | ||
|
|
40b127591f | ||
|
|
47e969cdf3 | ||
|
|
77aca35fce | ||
|
|
7338454322 | ||
|
|
f1112297ca | ||
|
|
6c2ee0d6ab | ||
|
|
a830f7dfa6 | ||
|
|
a369a43d94 | ||
|
|
3b55f8c137 | ||
|
|
936a9446a8 | ||
|
|
e4f211db68 | ||
|
|
502d029509 | ||
|
|
dcf08bd8bb | ||
|
|
cf9145a783 | ||
|
|
bfff5762e0 | ||
|
|
c035c3859c | ||
|
|
5aeb4f9b09 | ||
|
|
078369b8c5 | ||
|
|
e96cc36d56 | ||
|
|
682add5eae | ||
|
|
3a21a634f4 | ||
|
|
7be85679c5 | ||
|
|
7ca912e452 | ||
|
|
29f347c19c | ||
|
|
c0334e071a | ||
|
|
f8ce052630 | ||
|
|
9e549b0319 | ||
|
|
61a7846c76 | ||
|
|
777db9a5df | ||
|
|
a25e5f80a5 | ||
|
|
ce18bc392e | ||
|
|
79213f0d63 | ||
|
|
efece12cfe | ||
|
|
fe932c7b22 | ||
|
|
3f29a46f3a | ||
|
|
34690e4b51 | ||
|
|
532c32c884 | ||
|
|
0e9e6d511a | ||
|
|
30c0b2db00 | ||
|
|
5e5c9925f4 | ||
|
|
0a4ad63591 | ||
|
|
bb1145dced | ||
|
|
4021821b91 | ||
|
|
cfd1b1fea2 | ||
|
|
09c444753a | ||
|
|
bc8ccc959d | ||
|
|
01f36e1beb | ||
|
|
dc316e26f5 | ||
|
|
4a6efeba3d | ||
|
|
1b1982e9af | ||
|
|
2784d82a9e | ||
|
|
3def47c331 | ||
|
|
9b4f2d8601 | ||
|
|
1324d45491 | ||
|
|
1c8662ea63 | ||
|
|
c4f5f370ac | ||
|
|
fdd86924e5 | ||
|
|
cfcac262d1 | ||
|
|
a90cf9c7d2 | ||
|
|
28c0b58c2e | ||
|
|
ad84603bee | ||
|
|
2bc1b3e479 | ||
|
|
3fd25ffbcb | ||
|
|
eba9a7f8ef | ||
|
|
5183e7633b | ||
|
|
31491d1e2c | ||
|
|
b1059cafc4 | ||
|
|
7feaeb1483 | ||
|
|
eeff908b0c | ||
|
|
b6e4092ea7 | ||
|
|
c29f318128 | ||
|
|
aad479a19a | ||
|
|
267de619ba | ||
|
|
fdc614126a | ||
|
|
37f390d241 | ||
|
|
7d2cb84e70 | ||
|
|
c298e32942 | ||
|
|
38b2c8f012 | ||
|
|
e6eaec30ba | ||
|
|
10dca5503a | ||
|
|
ef4abec6cf | ||
|
|
f83ed07fcf | ||
|
|
968471f602 | ||
|
|
c4616ff186 | ||
|
|
7ccb98006a | ||
|
|
9d341950ff | ||
|
|
59a9c6b877 | ||
|
|
f4fa1f299c | ||
|
|
2e1838d890 | ||
|
|
dfbeed9daf | ||
|
|
21c056f51d | ||
|
|
cabb55290d | ||
|
|
a61c784b8c | ||
|
|
73690f8f83 | ||
|
|
44e63ce2e7 | ||
|
|
e380805990 | ||
|
|
5064b4d651 | ||
|
|
0fd7dec7b0 | ||
|
|
0b49021a6c | ||
|
|
399ccdeb47 | ||
|
|
832b91b033 | ||
|
|
a87e90c3c2 | ||
|
|
9c9d466752 | ||
|
|
8a55c05e7d | ||
|
|
10e560a9a6 | ||
|
|
ae659d5186 | ||
|
|
7fcae11d4d | ||
|
|
73562d46c5 | ||
|
|
fcc871737a | ||
|
|
5243e2daf4 | ||
|
|
dabe97f9c2 | ||
|
|
00bd015d79 | ||
|
|
53e3bcbd2b | ||
|
|
260f138d3f | ||
|
|
76d242dafc | ||
|
|
57164c8ca6 | ||
|
|
46f0334111 | ||
|
|
3860ba217b | ||
|
|
f71b09c5f7 | ||
|
|
9f7bc01f11 | ||
|
|
4d188cb9a2 | ||
|
|
791612dc4d | ||
|
|
d942a0ebc6 | ||
|
|
f18bd86ab8 | ||
|
|
d7d782be1c | ||
|
|
6fa56c1b38 | ||
|
|
b7ccf78b90 | ||
|
|
c68a229e09 | ||
|
|
79733f7d64 | ||
|
|
eed949d48d | ||
|
|
c84e70b627 | ||
|
|
cc15863fcc | ||
|
|
f078cb6bc7 | ||
|
|
a7b7841ac0 | ||
|
|
a35f998168 | ||
|
|
5db9c2aea1 | ||
|
|
fe21608e98 | ||
|
|
1cc0e5a50f | ||
|
|
82ec5b9fa8 | ||
|
|
93a97381bd | ||
|
|
88cd199d94 | ||
|
|
101731ed13 | ||
|
|
24db8a8866 | ||
|
|
06cab8cc30 | ||
|
|
adaf8a6098 | ||
|
|
783993dbf5 | ||
|
|
51e52f85c1 | ||
|
|
b7d59bee5b | ||
|
|
26c77cfc14 | ||
|
|
e5cc7703d7 | ||
|
|
21f2e62793 | ||
|
|
e5dccc1719 | ||
|
|
3d8751b495 | ||
|
|
a0bf6deebb | ||
|
|
aef88fb773 | ||
|
|
9d07027553 | ||
|
|
2828bfe6a8 | ||
|
|
06a5f5215f | ||
|
|
6de0a8ec00 | ||
|
|
8209a7efe3 | ||
|
|
fd984e754c | ||
|
|
757bdff7ed | ||
|
|
188265c594 | ||
|
|
d571c080b5 | ||
|
|
f416a7a121 | ||
|
|
4ea817568f | ||
|
|
46de3510cd | ||
|
|
0fe0d11e3b | ||
|
|
95b314de73 | ||
|
|
2d1060709b | ||
|
|
3610121dab | ||
|
|
bba85675a9 | ||
|
|
ae14721c5f | ||
|
|
eb3310060f | ||
|
|
2a5c9a242f | ||
|
|
91b131a080 | ||
|
|
ece21a668d | ||
|
|
9cc79b0fab | ||
|
|
796d043c0d | ||
|
|
219ae8a6db | ||
|
|
67ce0af650 | ||
|
|
91ad9df126 | ||
|
|
ceecccc3ae | ||
|
|
43fec63281 | ||
|
|
290c7a7d70 | ||
|
|
4b6456d0b9 | ||
|
|
f6e34e520e | ||
|
|
ba243df042 | ||
|
|
8ddec92724 | ||
|
|
c079e0d23a | ||
|
|
027c0c2eba | ||
|
|
3dc21365f5 | ||
|
|
c432928bd2 | ||
|
|
2738177fff | ||
|
|
1f341f8b4c | ||
|
|
18211ebe2e | ||
|
|
00217dd86e | ||
|
|
124296ee19 | ||
|
|
731bfccfb5 | ||
|
|
5c6f057840 | ||
|
|
4bb7d6b7c2 | ||
|
|
bb7119c642 | ||
|
|
7698bfd980 | ||
|
|
1739ee74d6 | ||
|
|
d75cc8fb72 | ||
|
|
97fefbaffb | ||
|
|
a71bc36860 | ||
|
|
44f64cf3d3 | ||
|
|
bb8c2e66e7 | ||
|
|
d8058362ec | ||
|
|
ba4d06e09e | ||
|
|
daba2128fc | ||
|
|
6568a962bc | ||
|
|
76a2530579 | ||
|
|
8daeeea1d9 | ||
|
|
ddd8a639b7 | ||
|
|
33cd126197 | ||
|
|
42ca499730 | ||
|
|
e079026ac8 | ||
|
|
3ad39f3741 | ||
|
|
4a751f0564 | ||
|
|
372f69853c | ||
|
|
ca11083385 | ||
|
|
b24c3dee40 | ||
|
|
2b209ba9b2 | ||
|
|
2a25bfdc7a | ||
|
|
05756ad567 | ||
|
|
6d5ae5eb76 | ||
|
|
d297af2ce8 | ||
|
|
34ba5348c8 | ||
|
|
5790b817f9 | ||
|
|
b8877be907 | ||
|
|
7a0730f5d2 | ||
|
|
fb7b9f94df | ||
|
|
690eb3f593 | ||
|
|
fa90c77c96 | ||
|
|
b10a0f536f | ||
|
|
6c0a56a26f | ||
|
|
cc8cc99213 | ||
|
|
c4be23a4c5 | ||
|
|
f392bf3ee1 | ||
|
|
02705d2c7f | ||
|
|
a0252cb0f2 | ||
|
|
c89686a8ab | ||
|
|
e45afea1d6 | ||
|
|
b56a70afc3 | ||
|
|
c6dfb73040 | ||
|
|
e7fb90fc67 | ||
|
|
20296b2567 | ||
|
|
ca7ae5c142 | ||
|
|
c050d775d5 | ||
|
|
6f2d258354 | ||
|
|
57be9e2905 | ||
|
|
fec2f9a1c0 | ||
|
|
01f9a1f552 | ||
|
|
04b7032846 | ||
|
|
02a94d79e8 | ||
|
|
4fdebc82d5 | ||
|
|
c306d3fe3b | ||
|
|
a4480b9523 | ||
|
|
9e2eb51ef4 | ||
|
|
cd2ade76c6 | ||
|
|
6621d70d99 | ||
|
|
1313c268dd | ||
|
|
6b05cc9f72 | ||
|
|
abeb59fe30 | ||
|
|
8a71f8860c | ||
|
|
db920f8dab | ||
|
|
96d0ffffa2 | ||
|
|
55405140f7 | ||
|
|
70f7adecda | ||
|
|
2533cb196d | ||
|
|
4654bb93c4 | ||
|
|
66bdf71f97 | ||
|
|
ce53b81dc5 | ||
|
|
2a9b34ce2d | ||
|
|
fa0aa6b53b | ||
|
|
4859bc8810 | ||
|
|
b2b43689f0 | ||
|
|
62f1293b89 | ||
|
|
614134b59b | ||
|
|
c96bf1636a | ||
|
|
eb10ca8a4b | ||
|
|
362554d2ac | ||
|
|
ce6785124c |
43 changed files with 7764 additions and 2235 deletions
11
.gitignore
vendored
Normal file
11
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
*.swp
|
||||
*~
|
||||
ncurses
|
||||
zstd
|
||||
static-*/
|
||||
zig-cache/
|
||||
zig-out/
|
||||
.zig-cache/
|
||||
1
AUTHORS
1
AUTHORS
|
|
@ -1 +0,0 @@
|
|||
ncdu is written by Yoran Heling <projects@yorhel.nl>
|
||||
20
COPYING
20
COPYING
|
|
@ -1,20 +0,0 @@
|
|||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
178
ChangeLog
178
ChangeLog
|
|
@ -1,40 +1,148 @@
|
|||
1.2 - 2007-07-24
|
||||
- Fixed some bugs on cygwin
|
||||
- Added du-like exclude patterns
|
||||
- Fixed bug #1758403: large directories work fine now
|
||||
- Rewrote a large part of the code
|
||||
- Fixed a bug with wide characters
|
||||
- Performance improvements when browsing large dirs
|
||||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
1.1 - 2007-04-30
|
||||
- Deleting files and directories is now possible from
|
||||
within ncdu.
|
||||
- The key for sorting directories between files has
|
||||
changed to 't' instead of 'd'. The 'd'-key is now
|
||||
used for deleting files.
|
||||
2.9.2 - 2025-10-24
|
||||
- Still requires Zig 0.14 or 0.15
|
||||
- Fix hang on loading config file when compiled with Zig 0.15.2
|
||||
|
||||
1.0 - 2007-04-06
|
||||
- First stable release
|
||||
- Small code cleanup
|
||||
- Added a key to toggle between sorting dirs before
|
||||
files and dirs between files
|
||||
- Added graphs and percentages to the directory
|
||||
browser (can be enabled or disabled with the 'g'-key)
|
||||
2.9.1 - 2025-08-21
|
||||
- Add support for building with Zig 0.15
|
||||
- Zig 0.14 is still supported
|
||||
|
||||
0.3 - 2007-03-04
|
||||
- When browsing back to the previous directory, the
|
||||
directory you're getting back from will be selected.
|
||||
- Added directory scanning in quiet mode to save
|
||||
bandwidth on remote connections.
|
||||
2.9 - 2025-08-16
|
||||
- Still requires Zig 0.14
|
||||
- Add --delete-command option to replace the built-in file deletion
|
||||
- Move term cursor to selected option in delete confirmation window
|
||||
- Support binary import on older Linux kernels lacking statx() (may break
|
||||
again in the future, Zig does not officially support such old kernels)
|
||||
|
||||
0.2 - 2007-02-26
|
||||
- Fixed POSIX compliance: replaced realpath() with my
|
||||
own implementation, and gettimeofday() is not
|
||||
required anymore (but highly recommended)
|
||||
- Added a warning for terminals smaller than 60x16
|
||||
- Mountpoints (or any other directory pointing to
|
||||
another filesystem) are now considered to be
|
||||
directories rather than files.
|
||||
2.8.2 - 2025-05-01
|
||||
- Still requires Zig 0.14
|
||||
- Fix a build error on MacOS
|
||||
|
||||
0.1 - 2007-02-21
|
||||
- Initial version
|
||||
2.8.1 - 2025-04-28
|
||||
- Still requires Zig 0.14
|
||||
- Fix integer overflow in binary export
|
||||
- Fix crash when `fstatat()` returns EINVAL
|
||||
- Minor build system improvements
|
||||
|
||||
2.8 - 2025-03-05
|
||||
- Now requires Zig 0.14
|
||||
- Add support for @-prefixed lines to ignore errors in config file
|
||||
- List all supported options in `--help`
|
||||
- Use `kB` instead of `KB` in `--si` mode
|
||||
|
||||
2.7 - 2024-11-19
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Support transparent reading/writing of zstandard-compressed JSON
|
||||
- Add `--compress` and `--export-block-size` options
|
||||
- Perform tilde expansion on paths in the config file
|
||||
- Fix JSON import of escaped UTF-16 surrogate pairs
|
||||
- Fix incorrect field in root item when exporting to the binary format
|
||||
- Add -Dstrip build flag
|
||||
|
||||
2.6 - 2024-09-27
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add dependency on libzstd
|
||||
- Add new export format to support threaded export and low-memory browsing
|
||||
- Add `-O` and `--compress-level` CLI flags
|
||||
- Add progress indicator to hardlink counting stage
|
||||
- Fix displaying and exporting zero values when extended info is not available
|
||||
- Fix clearing screen in some error cases
|
||||
- Fix uncommon edge case in hardlink counting on refresh
|
||||
- Use integer math instead of floating point to format numbers
|
||||
|
||||
2.5 - 2024-07-24
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add parallel scanning with `-t,--threads` CLI flags
|
||||
- Improve JSON export and import performance
|
||||
- `--exclude-kernfs` is no longer checked on the top-level scan path
|
||||
- Fix entries sometimes not showing up after refresh
|
||||
- Fix file descriptor leak with `--exclude-caches` checking
|
||||
- Fix possible crash on invalid UTF8 when scanning in `-1` UI mode
|
||||
- Fix JSON export and import of the "other filesystem" flag
|
||||
- Fix JSON import containing directories with a read error
|
||||
- Fix mtime display of 'special' files
|
||||
- Fix edge case bad performance when deleting hardlinks with many links
|
||||
- Increased memory use for hardlinks (by ~10% in extreme cases, sorry)
|
||||
|
||||
2.4 - 2024-04-21
|
||||
- Now requires Zig 0.12
|
||||
- Revert default color scheme back to 'off'
|
||||
- Rewrite man page in mdoc, drop pod2man dependency
|
||||
- Fix updating parent dir error status on refresh
|
||||
|
||||
2.3 - 2023-08-04
|
||||
- Now requires Zig 0.11
|
||||
- Add --(enable|disable)-natsort options
|
||||
- Add indicator to apparent size/disk usage selection in the footer
|
||||
- Fix build on armv7l (hopefully)
|
||||
- Minor build system additions
|
||||
|
||||
2.2.2 - 2023-01-19
|
||||
- Now requires Zig 0.10 or 0.10.1
|
||||
- That's it, pretty much.
|
||||
|
||||
2.2.1 - 2022-10-25
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix bug with 'dark' and 'off' color themes on FreeBSD and MacOS
|
||||
|
||||
2.2 - 2022-10-17
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- (breaking) Wildcards in exclude patterns don't cross directory boundary anymore
|
||||
- Improve exclude pattern matching performance
|
||||
- Set full background in default dark-bg color scheme
|
||||
- Fix broken JSON export when a filename contains control characters below 0x10
|
||||
|
||||
2.1.2 - 2022-04-28
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix possible crash on shortening file names with unicode variation
|
||||
selectors or combining marks
|
||||
|
||||
2.1.1 - 2022-03-25
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix potential crash when refreshing
|
||||
- Fix typo in --graph-style=eighth-block
|
||||
- Revert default --graph-style to hash characters
|
||||
|
||||
2.1 - 2022-02-07
|
||||
- Still requires Zig 0.9.0
|
||||
- Use natural sort order when sorting by file name
|
||||
- Use Unicode box drawing characters for the file size bar
|
||||
- Add --graph-style option to change drawing style for the file size bar
|
||||
- Fix early exit if a configuration directory does not exist
|
||||
- Fix display glitch for long file names
|
||||
- Fix display glitch with drawing unique/shared size column
|
||||
|
||||
2.0.1 - 2022-01-01
|
||||
- Still requires Zig 0.9.0
|
||||
- Fix build failure to find 'wcwidth' on some systems
|
||||
- Add ZIG_FLAGS option to Makefile
|
||||
|
||||
2.0 - 2021-12-21
|
||||
- Requires Zig 0.9.0
|
||||
- That's the only change.
|
||||
|
||||
2.0-beta3 - 2021-11-09
|
||||
- Requires Zig 0.8 or 0.8.1
|
||||
- Add lots of new CLI flags to configure ncdu
|
||||
- Add configuration file support
|
||||
- Add 'dark-bg' color scheme and use that by default
|
||||
- Fix not enabling -x by default
|
||||
- Fix export feature
|
||||
- Fix import of "special" dirs and files
|
||||
- Fix double-slash display in file browser
|
||||
|
||||
2.0-beta2 - 2021-07-31
|
||||
- Requires Zig 0.8
|
||||
- Significantly reduce memory usage for hard links
|
||||
- Slightly increase memory usage for directory entries
|
||||
- Fix reporting of fatal errors in the -0 and -1 scanning UIs
|
||||
|
||||
2.0-beta1 - 2021-07-22
|
||||
- Full release announcement: https://dev.yorhel.nl/doc/ncdu2
|
||||
- Requires Zig 0.8
|
||||
- Features and UI based on ncdu 1.16
|
||||
- Lower memory use in most scenarios (except with many hard links)
|
||||
- Improved performance of hard link counting
|
||||
- Extra column for shared/unique directory sizes
|
||||
|
|
|
|||
5
INSTALL
5
INSTALL
|
|
@ -1,5 +0,0 @@
|
|||
The usual:
|
||||
|
||||
./configure --prefix=/usr
|
||||
make
|
||||
make install
|
||||
9
LICENSES/MIT.txt
Normal file
9
LICENSES/MIT.txt
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) <year> <copyright holders>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
114
Makefile
Normal file
114
Makefile
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Optional semi-standard Makefile with some handy tools.
|
||||
# Ncdu itself can be built with just the zig build system.
|
||||
|
||||
ZIG ?= zig
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man/man1
|
||||
ZIG_FLAGS ?= --release=fast -Dstrip
|
||||
|
||||
NCDU_VERSION=$(shell grep 'program_version = "' src/main.zig | sed -e 's/^.*"\(.\+\)".*$$/\1/')
|
||||
|
||||
.PHONY: build test
|
||||
build: release
|
||||
|
||||
release:
|
||||
$(ZIG) build ${ZIG_FLAGS}
|
||||
|
||||
debug:
|
||||
$(ZIG) build
|
||||
|
||||
clean:
|
||||
rm -rf zig-cache zig-out
|
||||
|
||||
install: install-bin install-doc
|
||||
|
||||
install-bin: release
|
||||
mkdir -p ${BINDIR}
|
||||
install -m0755 zig-out/bin/ncdu ${BINDIR}/
|
||||
|
||||
install-doc:
|
||||
mkdir -p ${MANDIR}
|
||||
install -m0644 ncdu.1 ${MANDIR}/
|
||||
|
||||
uninstall: uninstall-bin uninstall-doc
|
||||
|
||||
# XXX: Ideally, these would also remove the directories created by 'install' if they are empty.
|
||||
uninstall-bin:
|
||||
rm -f ${BINDIR}/ncdu
|
||||
|
||||
uninstall-doc:
|
||||
rm -f ${MANDIR}/ncdu.1
|
||||
|
||||
dist:
|
||||
rm -f ncdu-${NCDU_VERSION}.tar.gz
|
||||
mkdir ncdu-${NCDU_VERSION}
|
||||
for f in `git ls-files | grep -v ^\.gitignore`; do mkdir -p ncdu-${NCDU_VERSION}/`dirname $$f`; ln -s "`pwd`/$$f" ncdu-${NCDU_VERSION}/$$f; done
|
||||
tar -cophzf ncdu-${NCDU_VERSION}.tar.gz --sort=name ncdu-${NCDU_VERSION}
|
||||
rm -rf ncdu-${NCDU_VERSION}
|
||||
|
||||
|
||||
# ASSUMPTION:
|
||||
# - the ncurses source tree has been extracted into ncurses/
|
||||
# - the zstd source tree has been extracted into zstd/
|
||||
# Would be nicer to do all this with the Zig build system, but no way am I
|
||||
# going to write build.zig's for these projects.
|
||||
static-%.tar.gz:
|
||||
mkdir -p static-$*/nc static-$*/inst/pkg
|
||||
cp -R zstd/lib static-$*/zstd
|
||||
make -C static-$*/zstd -j8 libzstd.a V=1\
|
||||
ZSTD_LIB_DICTBUILDER=0\
|
||||
ZSTD_LIB_MINIFY=1\
|
||||
ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1\
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"
|
||||
cd static-$*/nc && ../../ncurses/configure --prefix="`pwd`/../inst"\
|
||||
--without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs\
|
||||
--without-tests --disable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||
--without-gpm --without-sysmouse --enable-widec --with-default-terminfo-dir=/usr/share/terminfo\
|
||||
--with-terminfo-dirs=/usr/share/terminfo:/lib/terminfo:/usr/local/share/terminfo\
|
||||
--with-fallbacks="screen linux vt100 xterm xterm-256color" --host=$*\
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"\
|
||||
CPPFLAGS=-D_GNU_SOURCE && make -j8
|
||||
@# zig-build - cleaner approach but doesn't work, results in a dynamically linked binary.
|
||||
@#cd static-$* && PKG_CONFIG_LIBDIR="`pwd`/inst/pkg" zig build -Dtarget=$*
|
||||
@# --build-file ../build.zig --search-prefix inst/ --cache-dir zig -Drelease-fast=true
|
||||
@# Alternative approach, bypassing zig-build
|
||||
cd static-$* && ${ZIG} build-exe -target $*\
|
||||
-Inc/include -Izstd -lc nc/lib/libncursesw.a zstd/libzstd.a\
|
||||
--cache-dir zig-cache -static -fstrip -O ReleaseFast ../src/main.zig
|
||||
@# My system's strip can't deal with arm binaries and zig doesn't wrap a strip alternative.
|
||||
@# Whatever, just let it error for those.
|
||||
strip -R .eh_frame -R .eh_frame_hdr static-$*/main || true
|
||||
cd static-$* && mv main ncdu && tar -czf ../static-$*.tar.gz ncdu
|
||||
rm -rf static-$*
|
||||
|
||||
static-linux-x86_64: static-x86_64-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-x86_64.tar.gz
|
||||
|
||||
static-linux-x86: static-x86-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-x86.tar.gz
|
||||
|
||||
static-linux-aarch64: static-aarch64-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-aarch64.tar.gz
|
||||
|
||||
static-linux-arm: static-arm-linux-musleabi.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-arm.tar.gz
|
||||
|
||||
static:\
|
||||
static-linux-x86_64 \
|
||||
static-linux-x86 \
|
||||
static-linux-aarch64 \
|
||||
static-linux-arm
|
||||
|
||||
test:
|
||||
zig build test
|
||||
mandoc -T lint ncdu.1
|
||||
reuse lint
|
||||
|
|
@ -1 +0,0 @@
|
|||
SUBDIRS = src doc
|
||||
40
NEWS
40
NEWS
|
|
@ -1,40 +0,0 @@
|
|||
1.2 - 2007-07-24
|
||||
- Fixed some bugs on cygwin
|
||||
- Added du-like exclude patterns
|
||||
- Fixed bug #1758403: large directories work fine now
|
||||
- Rewrote a large part of the code
|
||||
- Fixed a bug with wide characters
|
||||
- Performance improvements when browsing large dirs
|
||||
|
||||
1.1 - 2007-04-30
|
||||
- Deleting files and directories is now possible from
|
||||
within ncdu.
|
||||
- The key for sorting directories between files has
|
||||
changed to 't' instead of 'd'. The 'd'-key is now
|
||||
used for deleting files.
|
||||
|
||||
1.0 - 2007-04-06
|
||||
- First stable release
|
||||
- Small code cleanup
|
||||
- Added a key to toggle between sorting dirs before
|
||||
files and dirs between files
|
||||
- Added graphs and percentages to the directory
|
||||
browser (can be enabled or disabled with the 'g'-key)
|
||||
|
||||
0.3 - 2007-03-04
|
||||
- When browsing back to the previous directory, the
|
||||
directory you're getting back from will be selected.
|
||||
- Added directory scanning in quiet mode to save
|
||||
bandwidth on remote connections.
|
||||
|
||||
0.2 - 2007-02-26
|
||||
- Fixed POSIX compliance: replaced realpath() with my
|
||||
own implementation, and gettimeofday() is not
|
||||
required anymore (but highly recommended)
|
||||
- Added a warning for terminals smaller than 60x16
|
||||
- Mountpoints (or any other directory pointing to
|
||||
another filesystem) are now considered to be
|
||||
directories rather than files.
|
||||
|
||||
0.1 - 2007-02-21
|
||||
- Initial version
|
||||
49
README
49
README
|
|
@ -1,49 +0,0 @@
|
|||
ncdu 1.2
|
||||
========
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
ncdu (NCurses Disk Usage) is a curses-based version of
|
||||
the well-known 'du', and provides a fast way to see what
|
||||
directories are using your disk space.
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
In order to compile and install ncdu, you need to have
|
||||
at least...
|
||||
|
||||
- a POSIX-compliant operating system (Linux, BSD, etc)
|
||||
- curses libraries and header files
|
||||
|
||||
|
||||
INSTALL
|
||||
|
||||
The usual:
|
||||
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
|
||||
COPYING
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
36
README.md
Normal file
36
README.md
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
<!--
|
||||
SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
SPDX-License-Identifier: MIT
|
||||
-->
|
||||
|
||||
# ncdu-zig
|
||||
|
||||
## Description
|
||||
|
||||
Ncdu is a disk usage analyzer with an ncurses interface. It is designed to find
|
||||
space hogs on a remote server where you don't have an entire graphical setup
|
||||
available, but it is a useful tool even on regular desktop systems. Ncdu aims
|
||||
to be fast, simple and easy to use, and should be able to run in any minimal
|
||||
POSIX-like environment with ncurses installed.
|
||||
|
||||
See the [ncdu 2 release announcement](https://dev.yorhel.nl/doc/ncdu2) for
|
||||
information about the differences between this Zig implementation (2.x) and the
|
||||
C version (1.x).
|
||||
|
||||
## Requirements
|
||||
|
||||
- Zig 0.14 or 0.15
|
||||
- Some sort of POSIX-like OS
|
||||
- ncurses
|
||||
- libzstd
|
||||
|
||||
## Install
|
||||
|
||||
You can use the Zig build system if you're familiar with that.
|
||||
|
||||
There's also a handy Makefile that supports the typical targets, e.g.:
|
||||
|
||||
```
|
||||
make
|
||||
sudo make install PREFIX=/usr
|
||||
```
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
# autogen.sh - creates configure scripts and more
|
||||
# requires autoconf
|
||||
|
||||
aclocal
|
||||
autoheader
|
||||
automake -a
|
||||
autoconf
|
||||
53
build.zig
Normal file
53
build.zig
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const pie = b.option(bool, "pie", "Build with PIE support (by default: target-dependant)");
|
||||
const strip = b.option(bool, "strip", "Strip debugging info (by default false)") orelse false;
|
||||
|
||||
const main_mod = b.createModule(.{
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.strip = strip,
|
||||
.link_libc = true,
|
||||
});
|
||||
main_mod.linkSystemLibrary("ncursesw", .{});
|
||||
main_mod.linkSystemLibrary("zstd", .{});
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "ncdu",
|
||||
.root_module = main_mod,
|
||||
});
|
||||
exe.pie = pie;
|
||||
// https://github.com/ziglang/zig/blob/faccd79ca5debbe22fe168193b8de54393257604/build.zig#L745-L748
|
||||
if (target.result.os.tag.isDarwin()) {
|
||||
// useful for package maintainers
|
||||
exe.headerpad_max_install_names = true;
|
||||
}
|
||||
b.installArtifact(exe);
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
}
|
||||
|
||||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_module = main_mod,
|
||||
});
|
||||
unit_tests.pie = pie;
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_unit_tests.step);
|
||||
}
|
||||
38
configure.in
38
configure.in
|
|
@ -1,38 +0,0 @@
|
|||
|
||||
AC_INIT(ncdu, 1.2, projects@yorhel.nl)
|
||||
AC_CONFIG_SRCDIR([src/ncdu.h])
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
AM_INIT_AUTOMAKE
|
||||
|
||||
# Checks for programs.
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_RANLIB
|
||||
|
||||
# Checks for libraries.
|
||||
AC_CHECK_LIB(ncurses, initscr)
|
||||
AC_CHECK_LIB(form, new_form)
|
||||
|
||||
# Checks for header files.
|
||||
AC_HEADER_DIRENT
|
||||
AC_HEADER_STDC
|
||||
AC_CHECK_HEADERS([limits.h stdlib.h string.h sys/time.h unistd.h fnmatch.h])
|
||||
|
||||
# Checks for typedefs, structures, and compiler characteristics.
|
||||
AC_C_CONST
|
||||
AC_TYPE_OFF_T
|
||||
AC_SYS_LARGEFILE
|
||||
AC_STRUCT_ST_BLOCKS
|
||||
AC_HEADER_TIME
|
||||
|
||||
# Checks for library functions.
|
||||
AC_FUNC_CLOSEDIR_VOID
|
||||
AC_FUNC_LSTAT
|
||||
AC_FUNC_MALLOC
|
||||
AC_FUNC_STAT
|
||||
AC_CHECK_FUNCS([getcwd gettimeofday memset fnmatch])
|
||||
|
||||
AC_OUTPUT([Makefile src/Makefile doc/Makefile])
|
||||
|
||||
echo ""
|
||||
echo "Now type \"make\" and \"make install\" to build and install ncdu"
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
man_MANS = ncdu.1
|
||||
EXTRA_DIST = ncdu.1
|
||||
130
doc/ncdu.1
130
doc/ncdu.1
|
|
@ -1,130 +0,0 @@
|
|||
." Text automatically generated by txt2man
|
||||
.TH NCDU 1 "July 21, 2007" "ncdu-1.2" "ncdu manual"
|
||||
.SH NAME
|
||||
\fBncdu \fP- NCurses Disk Usage
|
||||
.SH SYNOPSIS
|
||||
.nf
|
||||
.fam C
|
||||
\fBncdu\fP [\fB-ahqvx\fP] [\fIdir\fP]
|
||||
.fam T
|
||||
.fi
|
||||
.SH DESCRIPTION
|
||||
\fBncdu\fP (NCurses Disk Usage) is a curses-based version of
|
||||
the well-known 'du', and provides a fast way to see what
|
||||
directories are using your disk space.
|
||||
.PP
|
||||
If \fIdir\fP is specified, \fBncdu\fP will automatically start
|
||||
calculating the disk size. Otherwise, you will be prompted
|
||||
with a small configuration window.
|
||||
.PP
|
||||
For a more detailed explanation of \fBncdu\fP, please check
|
||||
http://dev.yorhel.nl/ncdu/
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB-a\fP
|
||||
Apparent sizes. Normally, \fBncdu\fP would calculate the
|
||||
disk usage of each file, setting \fB-a\fP will tell \fBncdu\fP
|
||||
to simply calculate the actual size of the files.
|
||||
.TP
|
||||
\fB-h\fP
|
||||
Print a small help message
|
||||
.TP
|
||||
\fB-q\fP
|
||||
Quiet mode. While calculating disk space, \fBncdu\fP will
|
||||
update the screen 10 times a second by default, this
|
||||
will be decreased to once every 2 seconds in quiet
|
||||
mode. Use this feature to save bandwidth over remote
|
||||
connections.
|
||||
.TP
|
||||
\fB-v\fP
|
||||
Print version.
|
||||
.TP
|
||||
\fB-x\fP
|
||||
Only count files and directories on the same
|
||||
filesystem as the specified \fIdir\fP.
|
||||
.TP
|
||||
\fB--exclude\fP PATTERN
|
||||
Exclude files that match PATTERN. This argument can
|
||||
be added multiple times to add more patterns.
|
||||
.TP
|
||||
\fB-X\fP FILE, \fB--exclude-from\fP FILE
|
||||
Exclude files that match any pattern in FILE. Patterns
|
||||
should be seperated by a newline.
|
||||
.SH KEYS
|
||||
.SS SETTINGS WINDOW
|
||||
.TP
|
||||
.B
|
||||
up/down/tab
|
||||
Cycle through the form fields.
|
||||
.TP
|
||||
.B
|
||||
Enter
|
||||
Post form
|
||||
.SS CALCULATING WINDOW
|
||||
.TP
|
||||
.B
|
||||
q
|
||||
Quit
|
||||
.SS BROWSER
|
||||
.TP
|
||||
.B
|
||||
?
|
||||
Show help + keys + about screen
|
||||
.TP
|
||||
.B
|
||||
up/down
|
||||
Cycle through the items
|
||||
.TP
|
||||
.B
|
||||
right/enter
|
||||
Open selected directory
|
||||
.TP
|
||||
.B
|
||||
left
|
||||
Go to parent directory
|
||||
.TP
|
||||
.B
|
||||
n
|
||||
Order by filename (press again for descending order)
|
||||
.TP
|
||||
.B
|
||||
s
|
||||
Order by filesize (press again for descending order)
|
||||
.TP
|
||||
.B
|
||||
d
|
||||
Delete the selected file or directory. An error message will be shown
|
||||
when the contents of the directory do not match or do not exist anymore
|
||||
on the filesystem.
|
||||
.TP
|
||||
.B
|
||||
t
|
||||
Toggle dirs before files when sorting.
|
||||
.TP
|
||||
.B
|
||||
g
|
||||
Toggle between showing percentage, graph, both, or none. Percentage
|
||||
is relative to the size of the current directory, graph is relative
|
||||
to the largest item in the current directory.
|
||||
.TP
|
||||
.B
|
||||
p
|
||||
Use powers of 1000 instead of 1024, press again to switch back
|
||||
.TP
|
||||
.B
|
||||
h
|
||||
Show/hide 'hidden' or 'excluded' files and directories. Please note that
|
||||
even though you can't see the hidden files and directories, they are still
|
||||
there and they are still included in the directory sizes. If you suspect
|
||||
that the totals shown at the bottom of the screen are not correct, make
|
||||
sure you haven't enabled this option.
|
||||
.TP
|
||||
.B
|
||||
q
|
||||
Quit
|
||||
.SH AUTHOR
|
||||
Written by Yoran Heling <projects@yorhel.nl>.
|
||||
.SH BUGS
|
||||
Infinite. Please contact the author if you find one.
|
||||
.SH SEE ALSO
|
||||
\fBdu\fP(1)
|
||||
620
ncdu.1
Normal file
620
ncdu.1
Normal file
|
|
@ -0,0 +1,620 @@
|
|||
.\" SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
.\" SPDX-License-Identifier: MIT
|
||||
.Dd August 16, 2025
|
||||
.Dt NCDU 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm ncdu
|
||||
.Nd NCurses Disk Usage
|
||||
.
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Op Fl f Ar file
|
||||
.Op Fl o Ar file
|
||||
.Op Fl O Ar file
|
||||
.Op Fl e , \-extended , \-no\-extended
|
||||
.Op Fl \-ignore\-config
|
||||
.Op Fl x , \-one\-file\-system , \-cross\-file\-system
|
||||
.Op Fl \-exclude Ar pattern
|
||||
.Op Fl X , \-exclude\-from Ar file
|
||||
.Op Fl \-include\-caches , \-exclude\-caches
|
||||
.Op Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||
.Op Fl \-include\-kernfs , \-exclude\-kernfs
|
||||
.Op Fl t , \-threads Ar num
|
||||
.Op Fl c , \-compress , \-no\-compress
|
||||
.Op Fl \-compress\-level Ar num
|
||||
.Op Fl \-export\-block\-size Ar num
|
||||
.Op Fl 0 , 1 , 2
|
||||
.Op Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||
.Op Fl \-enable\-shell , \-disable\-shell
|
||||
.Op Fl \-enable\-delete , \-disable\-delete
|
||||
.Op Fl \-enable\-refresh , \-disable\-refresh
|
||||
.Op Fl r
|
||||
.Op Fl \-si , \-no\-si
|
||||
.Op Fl \-disk\-usage , \-apparent\-size
|
||||
.Op Fl \-show\-hidden , \-hide\-hidden
|
||||
.Op Fl \-show\-itemcount , \-hide\-itemcount
|
||||
.Op Fl \-show\-mtime , \-hide\-mtime
|
||||
.Op Fl \-show\-graph , \-hide\-graph
|
||||
.Op Fl \-show\-percent , \-hide\-percent
|
||||
.Op Fl \-graph\-style Ar hash | half\-block | eighth\-block
|
||||
.Op Fl \-shared\-column Ar off | shared | unique
|
||||
.Op Fl \-sort Ar column
|
||||
.Op Fl \-enable\-natsort , \-disable\-natsort
|
||||
.Op Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||
.Op Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||
.Op Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||
.Op Fl \-delete\-command Ar command
|
||||
.Op Fl \-color Ar off | dark | dark-bg
|
||||
.Op Ar path
|
||||
.Nm
|
||||
.Op Fl h , \-help
|
||||
.Nm
|
||||
.Op Fl v , V , \-version
|
||||
.
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
(NCurses Disk Usage) is an interactive curses-based version of the well-known
|
||||
.Xr du 1 ,
|
||||
and provides a fast way to see what directories are using your disk space.
|
||||
.
|
||||
.Sh OPTIONS
|
||||
.Ss Mode Selection
|
||||
.Bl -tag -width Ds
|
||||
.It Fl h , \-help
|
||||
Print a short help message and quit.
|
||||
.It Fl v , V , \-version
|
||||
Print version and quit.
|
||||
.It Fl f Ar file
|
||||
Load the given file, which has earlier been created with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
flag.
|
||||
If
|
||||
.Ar file
|
||||
is equivalent to '\-', the file is read from standard input.
|
||||
Reading from standard input is only supported for the JSON format.
|
||||
.Pp
|
||||
For the sake of preventing a screw-up, the current version of
|
||||
.Nm
|
||||
will assume that the directory information in the imported file does not
|
||||
represent the filesystem on which the file is being imported.
|
||||
That is, the refresh, file deletion and shell spawning options in the browser
|
||||
will be disabled.
|
||||
.It Ar dir
|
||||
Scan the given directory.
|
||||
.It Fl o Ar file
|
||||
Export the directory tree in JSON format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
.Ar file
|
||||
is '\-', the data is written to standard output.
|
||||
See the examples section below for some handy use cases.
|
||||
.Pp
|
||||
Be warned that the exported data may grow quite large when exporting a
|
||||
directory with many files.
|
||||
10.000 files will get you an export in the order of 600 to 700 KiB
|
||||
uncompressed, or a little over 100 KiB when compressed with gzip.
|
||||
This scales linearly, so be prepared to handle a few tens of megabytes when
|
||||
dealing with millions of files.
|
||||
.Pp
|
||||
Consider enabling
|
||||
.Fl c
|
||||
to output Zstandard-compressed JSON, which can significantly reduce size of the
|
||||
exported data.
|
||||
.Pp
|
||||
When running a multi-threaded scan or when scanning a directory tree that may
|
||||
not fit in memory, consider using
|
||||
.Fl O
|
||||
instead.
|
||||
.It Fl O Ar file
|
||||
Export the directory tree in binary format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
.Ar file
|
||||
is '\-', the data is written to standard output.
|
||||
The binary format has built-in compression, supports low-memory multi-threaded
|
||||
export (in combination with
|
||||
.Fl t )
|
||||
and can be browsed without importing the entire directory tree into memory.
|
||||
.It Fl e , \-extended , \-no\-extended
|
||||
Enable/disable extended information mode.
|
||||
This will, in addition to the usual file information, also read the ownership,
|
||||
permissions and last modification time for each file.
|
||||
This will result in higher memory usage (by roughly ~30%) and in a larger
|
||||
output file when exporting.
|
||||
.Pp
|
||||
When using the file export/import function, this flag should be added both when
|
||||
exporting (to make sure the information is added to the export) and when
|
||||
importing (to read this extra information in memory).
|
||||
This flag has no effect when importing a file that has been exported without
|
||||
the extended information.
|
||||
.Pp
|
||||
This enables viewing and sorting by the latest child mtime, or modified time,
|
||||
using 'm' and 'M', respectively.
|
||||
.It Fl \-ignore\-config
|
||||
Do not attempt to load any configuration files.
|
||||
.El
|
||||
.
|
||||
.Ss Scan Options
|
||||
These options affect the scanning progress, they have no effect when importing
|
||||
directory information from a file.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl x , \-one\-file\-system
|
||||
Do not cross filesystem boundaries, i.e. only count files and directories on
|
||||
the same filesystem as the directory being scanned.
|
||||
.It Fl \-cross\-file\-system
|
||||
Do cross filesystem boundaries.
|
||||
This is the default, but can be specified to overrule a previously configured
|
||||
.Fl x .
|
||||
.It Fl \-exclude Ar pattern
|
||||
Exclude files that match
|
||||
.Ar pattern .
|
||||
The files are still displayed by default, but are not counted towards the disk
|
||||
usage statistics.
|
||||
This argument can be added multiple times to add more patterns.
|
||||
.It Fl X , \-exclude\-from Ar file
|
||||
Exclude files that match any pattern in
|
||||
.Ar file .
|
||||
Patterns should be separated by a newline.
|
||||
.It Fl \-include\-caches , \-exclude\-caches
|
||||
Include (default) or exclude directories containing
|
||||
.Pa CACHEDIR.TAG .
|
||||
Excluded cache directories are still displayed, but their contents will not be
|
||||
scanned or counted towards the disk usage statistics.
|
||||
.Lk https://bford.info/cachedir/
|
||||
.It Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||
Follow (or not) symlinks and count the size of the file they point to.
|
||||
This option does not follow symlinks to directories and will cause each
|
||||
symlinked file to count as a unique file.
|
||||
This is different from how hard links are handled.
|
||||
The exact counting behavior of this flag is subject to change in the future.
|
||||
.It Fl \-include\-kernfs , \-exclude\-kernfs
|
||||
(Linux only) Include (default) or exclude Linux pseudo filesystems such as
|
||||
.Pa /proc
|
||||
(procfs) and
|
||||
.Pa /sys
|
||||
(sysfs).
|
||||
.Pp
|
||||
The complete list of currently known pseudo filesystems is: binfmt, bpf, cgroup,
|
||||
cgroup2, debug, devpts, proc, pstore, security, selinux, sys, trace.
|
||||
.It Fl t , \-threads Ar num
|
||||
Number of threads to use when scanning the filesystem, defaults to 1.
|
||||
.Pp
|
||||
In single-threaded mode, the JSON export (see
|
||||
.Fl o )
|
||||
can operate with very little memory, but in multi-threaded mode the entire
|
||||
directory tree is first constructed in memory and written out after the
|
||||
filesystem scan has completed,
|
||||
This causes a delay in output and requires significantly more memory for large
|
||||
directory trees.
|
||||
The binary format (see
|
||||
.Fl O )
|
||||
does not have this problem and supports efficient exporting with any number of
|
||||
threads.
|
||||
.El
|
||||
.
|
||||
.Ss Export Options
|
||||
These options affect behavior when exporting to file with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
options.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl c , \-compress , \-no\-compress
|
||||
Enable or disable Zstandard compression when exporting to JSON (see
|
||||
.Fl o ) .
|
||||
.It Fl \-compress\-level Ar num
|
||||
Set the Zstandard compression level when using
|
||||
.Fl O
|
||||
or
|
||||
.Fl c .
|
||||
Valid values are 1 (fastest) to 19 (slowest).
|
||||
Defaults to 4.
|
||||
.It Fl \-export\-block\-size Ar num
|
||||
Set the block size, in kibibytes, for the binary export format (see
|
||||
.Fl O ) .
|
||||
Larger blocks require more memory but result in better compression efficiency.
|
||||
This option can be combined with a higher
|
||||
.Fl \-compress\-level
|
||||
for even better compression.
|
||||
.Pp
|
||||
Accepted values are between 4 and 16000.
|
||||
The defaults is to start at 64 KiB and then gradually increase the block size
|
||||
for large exports.
|
||||
.El
|
||||
.
|
||||
.Ss Interface Options
|
||||
.Bl -tag -width Ds
|
||||
.It Fl 0
|
||||
Don't give any feedback while scanning a directory or importing a file, except
|
||||
when a fatal error occurs.
|
||||
Ncurses will not be initialized until the scan is complete.
|
||||
When exporting the data with
|
||||
.Fl o ,
|
||||
ncurses will not be initialized at all.
|
||||
This option is the default when exporting to standard output.
|
||||
.It Fl 1
|
||||
Write progress information to the terminal, but don't open a full-screen
|
||||
ncurses interface.
|
||||
This option is the default when exporting to a file.
|
||||
.Pp
|
||||
In some cases, the ncurses browser interface which you'll see after the
|
||||
scan/import is complete may look garbled when using this option.
|
||||
If you're not exporting to a file,
|
||||
.Fl 2
|
||||
is usually a better choice.
|
||||
.It Fl 2
|
||||
Show a full-screen ncurses interface while scanning a directory or importing
|
||||
a file.
|
||||
This is the only interface that provides feedback on any non-fatal errors while
|
||||
scanning.
|
||||
.It Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||
Change the UI update interval while scanning or importing.
|
||||
.Nm
|
||||
updates the screen 10 times a second by default (with
|
||||
.Fl \-fast\-ui\-updates
|
||||
), this can be decreased to once every 2 seconds with
|
||||
.Fl q
|
||||
or
|
||||
.Fl \-slow\-ui\-updates .
|
||||
This option can be used to save bandwidth over remote connections.
|
||||
This option has no effect in combination with
|
||||
.Fl 0 .
|
||||
.It Fl \-enable\-shell , \-disable\-shell
|
||||
Enable or disable shell spawning from the file browser.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
.It Fl \-enable\-delete , \-disable\-delete
|
||||
Enable or disable the built-in file deletion feature.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
Explicitly disabling the deletion feature can work as a safeguard to prevent
|
||||
accidental data loss.
|
||||
.It Fl \-enable\-refresh , \-disable\-refresh
|
||||
Enable or disable directory refreshing from the file browser.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
.It Fl r
|
||||
Read-only mode.
|
||||
When given once, this is an alias for
|
||||
.Fl \-disable\-delete ,
|
||||
when given twice it will also add
|
||||
.Fl \-disable\-shell ,
|
||||
thus ensuring that there is no way to modify the file system from within
|
||||
.Nm .
|
||||
.It Fl \-si , \-no\-si
|
||||
List sizes using base 10 prefixes, that is, powers of 1000 (kB, MB, etc), as
|
||||
defined in the International System of Units (SI), instead of the usual base 2
|
||||
prefixes (KiB, MiB, etc).
|
||||
.It Fl \-disk\-usage , \-apparent\-size
|
||||
Select whether to display disk usage (default) or apparent sizes.
|
||||
Can also be toggled in the file browser with the 'a' key.
|
||||
.It Fl \-show\-hidden , \-hide\-hidden
|
||||
Show (default) or hide "hidden" and excluded files.
|
||||
Can also be toggled in the file browser with the 'e' key.
|
||||
.It Fl \-show\-itemcount , \-hide\-itemcount
|
||||
Show or hide (default) the item counts column.
|
||||
Can also be toggled in the file browser with the 'c' key.
|
||||
.It Fl \-show\-mtime , \-hide\-mtime
|
||||
Show or hide (default) the last modification time column.
|
||||
Can also be toggled in the file browser with the 'm' key.
|
||||
This option is ignored when not in extended mode, see
|
||||
.Fl e .
|
||||
.It Fl \-show\-graph , \-hide\-graph
|
||||
Show (default) or hide the relative size bar column.
|
||||
Can also be toggled in the file browser with the 'g' key.
|
||||
.It Fl \-show\-percent , \-hide\-percent
|
||||
Show (default) or hide the relative size percent column.
|
||||
Can also be toggled in the file browser with the 'g' key.
|
||||
.It Fl \-graph\-style Ar hash | half\-block | eighth\-block
|
||||
Change the way that the relative size bar column is drawn.
|
||||
Recognized values are
|
||||
.Ar hash
|
||||
to draw ASCII '#' characters (default and most portable),
|
||||
.Ar half\-block
|
||||
to use half-block drawing characters or
|
||||
.Ar eighth\-block
|
||||
to use eighth-block drawing characters.
|
||||
Eighth-block characters are the most precise but may not render correctly in
|
||||
all terminals.
|
||||
.It Fl \-shared\-column Ar off | shared | unique
|
||||
Set to
|
||||
.Ar off
|
||||
to disable the shared size column for directories,
|
||||
.Ar shared
|
||||
(default) to display shared directory sizes as a separate column or
|
||||
.Ar unique
|
||||
to display unique directory sizes as a separate column.
|
||||
These options can also be cycled through in the file browser with the 'u' key.
|
||||
.It Fl \-sort Ar column
|
||||
Change the default column to sort on.
|
||||
Accepted values are
|
||||
.Ar disk\-usage
|
||||
(the default),
|
||||
.Ar name , apparent\-size , itemcount
|
||||
or
|
||||
.Ar mtime .
|
||||
The latter only makes sense in extended mode, see
|
||||
.Fl e .
|
||||
.Pp
|
||||
The column name can be suffixed with
|
||||
.Li \-asc
|
||||
or
|
||||
.Li \-desc
|
||||
to change the order to ascending or descending, respectively.
|
||||
For example,
|
||||
.Li \-\-sort=name\-desc
|
||||
to sort by name in descending order.
|
||||
.It Fl \-enable\-natsort , \-disable\-natsort
|
||||
Enable (default) or disable natural sort when sorting by file name.
|
||||
.It Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||
Sort (or not) directories before files.
|
||||
.It Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||
Require a confirmation before quitting ncdu.
|
||||
Can be helpful when you accidentally press 'q' during or after a very long scan.
|
||||
.It Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||
Require a confirmation before deleting a file or directory.
|
||||
Enabled by default, but can be disabled if you're absolutely sure you won't
|
||||
accidentally press 'd'.
|
||||
.It Fl \-delete\-command Ar command
|
||||
When set to a non-empty string, replace the built-in file deletion feature with
|
||||
a custom shell command.
|
||||
.Pp
|
||||
The absolute path of the item to be deleted is appended to the given command
|
||||
and the result is evaluated in a shell.
|
||||
The command is run from the same directory that ncdu itself was started in.
|
||||
The
|
||||
.Ev NCDU_DELETE_PATH
|
||||
environment variable is set to the absolute path of the item to be deleted and
|
||||
.Ev NCDU_LEVEL
|
||||
is set in the same fashion as when spawning a shell from within ncdu.
|
||||
.Pp
|
||||
After command completion, the in-memory view of the selected item is refreshed
|
||||
and directory sizes are adjusted as necessary.
|
||||
This is not a full refresh of the complete directory tree, so if the item has
|
||||
been renamed or moved to another directory, it's new location is not
|
||||
automatically picked up.
|
||||
.Pp
|
||||
For example, to use
|
||||
.Xr rm 1
|
||||
interactive mode to prompt before each deletion:
|
||||
.Dl ncdu --no-confirm-delete --delete-command \[aq]rm -ri --\[aq]
|
||||
Or to move files to trash:
|
||||
.Dl ncdu --delete-command \[aq]gio trash --\[aq]
|
||||
.It Fl \-color Ar off | dark | dark-bg
|
||||
Set the color scheme.
|
||||
The following schemes are recognized:
|
||||
.Ar off
|
||||
to disable colors,
|
||||
.Ar dark
|
||||
for a color scheme intended for dark backgrounds and
|
||||
.Ar dark\-bg
|
||||
for a variation of the
|
||||
.Ar dark
|
||||
color scheme that also works in terminals with a light background.
|
||||
.Pp
|
||||
The default is
|
||||
.Ar off .
|
||||
.El
|
||||
.
|
||||
.Sh CONFIGURATION
|
||||
.Nm
|
||||
can be configured by placing command-line options in
|
||||
.Pa /etc/ncdu.conf
|
||||
or
|
||||
.Pa $HOME/.config/ncdu/config .
|
||||
If both files exist, the system configuration will be loaded before the user
|
||||
configuration, allowing users to override options set in the system
|
||||
configuration.
|
||||
Options given on the command line will override options set in the
|
||||
configuration files.
|
||||
The files will not be read at all when
|
||||
.Fl \-ignore\-config
|
||||
is given on the command line.
|
||||
.Pp
|
||||
The configuration file format is simply one command line option per line.
|
||||
Lines starting with '#' are ignored.
|
||||
A line can be prefixed with '@' to suppress errors while parsing the option.
|
||||
Example configuration file:
|
||||
.Bd -literal -offset indent
|
||||
# Always enable extended mode
|
||||
\-e
|
||||
|
||||
# Disable file deletion
|
||||
\-\-disable\-delete
|
||||
|
||||
# Exclude .git directories
|
||||
\-\-exclude .git
|
||||
|
||||
# Read excludes from ~/.ncduexcludes, ignore error if the file does not exist
|
||||
@--exclude-from ~/.ncduexcludes
|
||||
.Ed
|
||||
.
|
||||
.Sh KEYS
|
||||
.Bl -tag -width Ds
|
||||
.It ?
|
||||
Open help + keys + about screen
|
||||
.It up , down , j , k
|
||||
Cycle through the items
|
||||
.It right, enter, l
|
||||
Open selected directory
|
||||
.It left, <, h
|
||||
Go to parent directory
|
||||
.It n
|
||||
Order by filename (press again for descending order)
|
||||
.It s
|
||||
Order by filesize (press again for descending order)
|
||||
.It C
|
||||
Order by number of items (press again for descending order)
|
||||
.It a
|
||||
Toggle between showing disk usage and showing apparent size.
|
||||
.It M
|
||||
Order by latest child mtime, or modified time (press again for descending
|
||||
order).
|
||||
Requires the
|
||||
.Fl e
|
||||
flag.
|
||||
.It d
|
||||
Delete the selected file or directory.
|
||||
An error message will be shown when the contents of the directory do not match
|
||||
or do not exist anymore on the filesystem.
|
||||
.It t
|
||||
Toggle dirs before files when sorting.
|
||||
.It g
|
||||
Toggle between showing percentage, graph, both, or none.
|
||||
Percentage is relative to the size of the current directory, graph is relative
|
||||
to the largest item in the current directory.
|
||||
.It u
|
||||
Toggle display of the shared / unique size column for directories that share
|
||||
hard links.
|
||||
This column is only visible if the current listing contains directories with
|
||||
shared hard links.
|
||||
.It c
|
||||
Toggle display of child item counts.
|
||||
.It m
|
||||
Toggle display of latest child mtime, or modified time.
|
||||
Requires the
|
||||
.Fl e
|
||||
flag.
|
||||
.It e
|
||||
Show/hide 'hidden' or 'excluded' files and directories.
|
||||
Be aware that even if you can't see the hidden files and directories, they are
|
||||
still there and they are still included in the directory sizes.
|
||||
If you suspect that the totals shown at the bottom of the screen are not
|
||||
correct, make sure you haven't enabled this option.
|
||||
.It i
|
||||
Show information about the current selected item.
|
||||
.It r
|
||||
Refresh/recalculate the current directory.
|
||||
.It b
|
||||
Spawn shell in current directory.
|
||||
.Pp
|
||||
.Nm
|
||||
determines your preferred shell from the
|
||||
.Ev NCDU_SHELL
|
||||
or
|
||||
.Ev SHELL
|
||||
environment variable (in that order), or calls
|
||||
.Pa /bin/sh
|
||||
if neither are set.
|
||||
This allows you to also configure another command to be run when he 'b' key is
|
||||
pressed.
|
||||
For example, to spawn the
|
||||
.Xr vifm 1
|
||||
file manager instead of a shell, run
|
||||
.Nm
|
||||
as follows:
|
||||
.Dl NCDU_SHELL=vifm ncdu
|
||||
The
|
||||
.Ev NCDU_LEVEL
|
||||
environment variable is set or incremented before spawning the shell, allowing
|
||||
you to detect if your shell is running from within
|
||||
.Nm .
|
||||
This can be useful to avoid nesting multiple instances, although
|
||||
.Nm
|
||||
itself does not (currently) warn about or prevent this situation.
|
||||
.It q
|
||||
Quit
|
||||
.El
|
||||
.
|
||||
.Sh FILE FLAGS
|
||||
Entries in the browser interface may be prefixed by a one\-character flag.
|
||||
These flags have the following meaning:
|
||||
.Bl -tag -width Ds
|
||||
.It !
|
||||
An error occurred while reading this directory.
|
||||
.It \.
|
||||
An error occurred while reading a subdirectory, so the indicated size may not
|
||||
be correct.
|
||||
.It <
|
||||
File or directory is excluded from the statistics by using exclude patterns.
|
||||
.It >
|
||||
Directory is on another filesystem.
|
||||
.It ^
|
||||
Directory is excluded from the statistics due to being a Linux pseudo
|
||||
filesystem.
|
||||
.It @
|
||||
This is neither a file nor a folder (symlink, socket, ...).
|
||||
.It H
|
||||
Same file was already counted (hard link).
|
||||
.It e
|
||||
Empty directory.
|
||||
.El
|
||||
.
|
||||
.Sh EXAMPLES
|
||||
To scan and browse the directory you're currently in, all you need is a simple:
|
||||
.Dl ncdu
|
||||
To scan a full filesystem, for example your root filesystem, you'll want to use
|
||||
.Fl x :
|
||||
.Dl ncdu \-x /
|
||||
.Pp
|
||||
Since scanning a large directory may take a while, you can scan a directory and
|
||||
export the results for later viewing:
|
||||
.Bd -literal -offset indent
|
||||
ncdu \-1xO export.ncdu /
|
||||
# ...some time later:
|
||||
ncdu \-f export.ncdu
|
||||
.Ed
|
||||
To export from a cron job, make sure to replace
|
||||
.Fl 1
|
||||
with
|
||||
.Fl 0
|
||||
to suppress unnecessary progress output.
|
||||
.Pp
|
||||
You can also export a directory and browse it once scanning is done:
|
||||
.Dl ncdu \-co\- | tee export.json.zst | ./ncdu \-f\-
|
||||
.Pp
|
||||
To scan a system remotely, but browse through the files locally:
|
||||
.Dl ssh user@system ncdu \-co\- / | ./ncdu \-f\-
|
||||
Remote scanning and local viewing has two major advantages when
|
||||
compared to running
|
||||
.Nm
|
||||
directly on the remote system: You can browse through the scanned directory on
|
||||
the local system without any network latency, and
|
||||
.Nm
|
||||
does not keep the entire directory structure in memory when exporting, so this
|
||||
won't consume much memory on the remote system.
|
||||
.
|
||||
.Sh SEE ALSO
|
||||
.Xr du 1 ,
|
||||
.Xr tree 1 .
|
||||
.Pp
|
||||
.Nm
|
||||
has a website:
|
||||
.Lk https://dev.yorhel.nl/ncdu
|
||||
.
|
||||
.Sh AUTHORS
|
||||
Written by
|
||||
.An Yorhel Aq Mt projects@yorhel.nl
|
||||
.
|
||||
.Sh BUGS
|
||||
Directory hard links and firmlinks (MacOS) are not supported.
|
||||
They are not detected as being hard links and will thus get scanned and counted
|
||||
multiple times.
|
||||
.Pp
|
||||
Some minor glitches may appear when displaying filenames that contain multibyte
|
||||
or multicolumn characters.
|
||||
.Pp
|
||||
The unique and shared directory sizes are calculated based on the assumption
|
||||
that the link count of hard links does not change during a filesystem scan or
|
||||
in between refreshes.
|
||||
If this does happen, for example when a hard link is deleted, then these
|
||||
numbers will be very much incorrect and a full refresh by restarting ncdu is
|
||||
needed to get correct numbers again.
|
||||
.Pp
|
||||
All sizes are internally represented as a signed 64bit integer.
|
||||
If you have a directory larger than 8 EiB minus one byte, ncdu will clip its
|
||||
size to 8 EiB minus one byte.
|
||||
When deleting or refreshing items in a directory with a clipped size, the
|
||||
resulting sizes will be incorrect.
|
||||
Likewise, item counts are stored in a 32-bit integer, so will be incorrect in
|
||||
the unlikely event that you happen to have more than 4 billion items in a
|
||||
directory.
|
||||
.Pp
|
||||
Please report any other bugs you may find at the bug tracker, which can be
|
||||
found on the web site at
|
||||
.Lk https://dev.yorhel.nl/ncdu
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
bin_PROGRAMS = ncdu
|
||||
|
||||
ncdu_SOURCES = browser.c calc.c main.c settings.c util.c exclude.c help.c delete.c
|
||||
|
||||
noinst_HEADERS = ncdu.h
|
||||
468
src/bin_export.zig
Normal file
468
src/bin_export.zig
Normal file
|
|
@ -0,0 +1,468 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var file_off: u64 = 0;
|
||||
var lock: std.Thread.Mutex = .{};
|
||||
var root_itemref: u64 = 0;
|
||||
};
|
||||
|
||||
pub const SIGNATURE = "\xbfncduEX1";
|
||||
|
||||
pub const ItemKey = enum(u5) {
|
||||
// all items
|
||||
type = 0, // EType
|
||||
name = 1, // bytes
|
||||
prev = 2, // itemref
|
||||
// Only for non-specials
|
||||
asize = 3, // u64
|
||||
dsize = 4, // u64
|
||||
// Only for .dir
|
||||
dev = 5, // u64 only if different from parent dir
|
||||
rderr = 6, // bool true = error reading directory list, false = error in sub-item, absent = no error
|
||||
cumasize = 7, // u64
|
||||
cumdsize = 8, // u64
|
||||
shrasize = 9, // u64
|
||||
shrdsize = 10, // u64
|
||||
items = 11, // u64
|
||||
sub = 12, // itemref only if dir is not empty
|
||||
// Only for .link
|
||||
ino = 13, // u64
|
||||
nlink = 14, // u32
|
||||
// Extended mode
|
||||
uid = 15, // u32
|
||||
gid = 16, // u32
|
||||
mode = 17, // u16
|
||||
mtime = 18, // u64
|
||||
_,
|
||||
};
|
||||
|
||||
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
||||
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
||||
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).@"enum".fields.len;
|
||||
|
||||
pub const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
||||
|
||||
inline fn bigu16(v: u16) [2]u8 { return @bitCast(std.mem.nativeToBig(u16, v)); }
|
||||
inline fn bigu32(v: u32) [4]u8 { return @bitCast(std.mem.nativeToBig(u32, v)); }
|
||||
inline fn bigu64(v: u64) [8]u8 { return @bitCast(std.mem.nativeToBig(u64, v)); }
|
||||
|
||||
inline fn blockHeader(id: u4, len: u28) [4]u8 { return bigu32((@as(u32, id) << 28) | len); }
|
||||
|
||||
inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(major)) << 5) | arg; }
|
||||
|
||||
|
||||
// (Uncompressed) data block size.
|
||||
// Start with 64k, then use increasingly larger block sizes as the export file
|
||||
// grows. This is both to stay within the block number limit of the index block
|
||||
// and because, with a larger index block, the reader will end up using more
|
||||
// memory anyway.
|
||||
fn blockSize(num: u32) usize {
|
||||
// block size uncompressed data in this num range
|
||||
// # mil # KiB # GiB
|
||||
return main.config.export_block_size
|
||||
orelse if (num < ( 1<<20)) 64<<10 // 64
|
||||
else if (num < ( 2<<20)) 128<<10 // 128
|
||||
else if (num < ( 4<<20)) 256<<10 // 512
|
||||
else if (num < ( 8<<20)) 512<<10 // 2048
|
||||
else if (num < (16<<20)) 1024<<10 // 8192
|
||||
else 2048<<10; // 32768
|
||||
}
|
||||
|
||||
// Upper bound on the return value of blockSize()
|
||||
// (config.export_block_size may be larger than the sizes listed above, let's
|
||||
// stick with the maximum block size supported by the file format to be safe)
|
||||
const MAX_BLOCK_SIZE: usize = 1<<28;
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
buf: []u8 = undefined,
|
||||
off: usize = MAX_BLOCK_SIZE, // pretend we have a full block to trigger a flush() for the first write
|
||||
block_num: u32 = std.math.maxInt(u32),
|
||||
itemref: u64 = 0, // ref of item currently being written
|
||||
|
||||
// unused, but kept around for easy debugging
|
||||
fn compressNone(in: []const u8, out: []u8) usize {
|
||||
@memcpy(out[0..in.len], in);
|
||||
return in.len;
|
||||
}
|
||||
|
||||
fn compressZstd(in: []const u8, out: []u8) usize {
|
||||
while (true) {
|
||||
const r = c.ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
||||
if (c.ZSTD_isError(r) == 0) return r;
|
||||
ui.oom(); // That *ought* to be the only reason the above call can fail.
|
||||
}
|
||||
}
|
||||
|
||||
fn createBlock(t: *Thread) std.ArrayListUnmanaged(u8) {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
if (t.block_num == std.math.maxInt(u32) or t.off == 0) return out;
|
||||
|
||||
out.ensureTotalCapacityPrecise(main.allocator, 12 + @as(usize, @intCast(c.ZSTD_COMPRESSBOUND(@as(c_int, @intCast(t.off)))))) catch unreachable;
|
||||
out.items.len = out.capacity;
|
||||
const bodylen = compressZstd(t.buf[0..t.off], out.items[8..]);
|
||||
out.items.len = 12 + bodylen;
|
||||
|
||||
out.items[0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
out.items[4..8].* = bigu32(t.block_num);
|
||||
out.items[8+bodylen..][0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
return out;
|
||||
}
|
||||
|
||||
fn flush(t: *Thread, expected_len: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
var block = createBlock(t);
|
||||
defer block.deinit(main.allocator);
|
||||
|
||||
global.lock.lock();
|
||||
defer global.lock.unlock();
|
||||
// This can only really happen when the root path exceeds our block size,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (expected_len > t.buf.len) ui.die("Error writing data: path too long.\n", .{});
|
||||
|
||||
if (block.items.len > 0) {
|
||||
if (global.file_off >= (1<<40)) ui.die("Export data file has grown too large, please report a bug.\n", .{});
|
||||
global.index.items[4..][t.block_num*8..][0..8].* = bigu64((global.file_off << 24) + block.items.len);
|
||||
global.file_off += block.items.len;
|
||||
global.fd.writeAll(block.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
}
|
||||
|
||||
t.off = 0;
|
||||
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
||||
global.index.appendSlice(main.allocator, &[1]u8{0}**8) catch unreachable;
|
||||
if (global.index.items.len + 12 >= (1<<28)) ui.die("Too many data blocks, please report a bug.\n", .{});
|
||||
|
||||
const newsize = blockSize(t.block_num);
|
||||
if (t.buf.len != newsize) t.buf = main.allocator.realloc(t.buf, newsize) catch unreachable;
|
||||
}
|
||||
|
||||
fn cborHead(t: *Thread, major: CborMajor, arg: u64) void {
|
||||
if (arg <= 23) {
|
||||
t.buf[t.off] = cborByte(major, @intCast(arg));
|
||||
t.off += 1;
|
||||
} else if (arg <= std.math.maxInt(u8)) {
|
||||
t.buf[t.off] = cborByte(major, 24);
|
||||
t.buf[t.off+1] = @truncate(arg);
|
||||
t.off += 2;
|
||||
} else if (arg <= std.math.maxInt(u16)) {
|
||||
t.buf[t.off] = cborByte(major, 25);
|
||||
t.buf[t.off+1..][0..2].* = bigu16(@intCast(arg));
|
||||
t.off += 3;
|
||||
} else if (arg <= std.math.maxInt(u32)) {
|
||||
t.buf[t.off] = cborByte(major, 26);
|
||||
t.buf[t.off+1..][0..4].* = bigu32(@intCast(arg));
|
||||
t.off += 5;
|
||||
} else {
|
||||
t.buf[t.off] = cborByte(major, 27);
|
||||
t.buf[t.off+1..][0..8].* = bigu64(arg);
|
||||
t.off += 9;
|
||||
}
|
||||
}
|
||||
|
||||
fn cborIndef(t: *Thread, major: CborMajor) void {
|
||||
t.buf[t.off] = cborByte(major, 31);
|
||||
t.off += 1;
|
||||
}
|
||||
|
||||
fn itemKey(t: *Thread, key: ItemKey) void {
|
||||
t.cborHead(.pos, @intFromEnum(key));
|
||||
}
|
||||
|
||||
fn itemRef(t: *Thread, key: ItemKey, ref: ?u64) void {
|
||||
const r = ref orelse return;
|
||||
t.itemKey(key);
|
||||
// Full references compress like shit and most of the references point
|
||||
// into the same block, so optimize that case by using a negative
|
||||
// offset instead.
|
||||
if ((r >> 24) == t.block_num) t.cborHead(.neg, t.itemref - r - 1)
|
||||
else t.cborHead(.pos, r);
|
||||
}
|
||||
|
||||
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
||||
fn itemStart(t: *Thread, itype: model.EType, prev_item: ?u64, name: []const u8) u64 {
|
||||
const min_len = name.len + MAX_ITEM_LEN;
|
||||
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
||||
|
||||
t.itemref = (@as(u64, t.block_num) << 24) | t.off;
|
||||
t.cborIndef(.map);
|
||||
t.itemKey(.type);
|
||||
if (@intFromEnum(itype) >= 0) t.cborHead(.pos, @intCast(@intFromEnum(itype)))
|
||||
else t.cborHead(.neg, @intCast(-1 - @intFromEnum(itype)));
|
||||
t.itemKey(.name);
|
||||
t.cborHead(.bytes, name.len);
|
||||
@memcpy(t.buf[t.off..][0..name.len], name);
|
||||
t.off += name.len;
|
||||
t.itemRef(.prev, prev_item);
|
||||
return t.itemref;
|
||||
}
|
||||
|
||||
fn itemExt(t: *Thread, stat: *const sink.Stat) void {
|
||||
if (!main.config.extended) return;
|
||||
if (stat.ext.pack.hasuid) {
|
||||
t.itemKey(.uid);
|
||||
t.cborHead(.pos, stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
t.itemKey(.gid);
|
||||
t.cborHead(.pos, stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
t.itemKey(.mode);
|
||||
t.cborHead(.pos, stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
t.itemKey(.mtime);
|
||||
t.cborHead(.pos, stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
|
||||
fn itemEnd(t: *Thread) void {
|
||||
t.cborIndef(.simple);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
// TODO: When items are written out into blocks depth-first, parent dirs
|
||||
// will end up getting their items distributed over many blocks, which will
|
||||
// significantly slow down reading that dir's listing. It may be worth
|
||||
// buffering some items at the Dir level before flushing them out to the
|
||||
// Thread buffer.
|
||||
|
||||
// The lock protects all of the below, and is necessary because final()
|
||||
// accesses the parent dir and may be called from other threads.
|
||||
// I'm not expecting much lock contention, but it's possible to turn
|
||||
// last_item into an atomic integer and other fields could be split up for
|
||||
// subdir use.
|
||||
lock: std.Thread.Mutex = .{},
|
||||
last_sub: ?u64 = null,
|
||||
stat: sink.Stat,
|
||||
items: u64 = 0,
|
||||
size: u64 = 0,
|
||||
blocks: u64 = 0,
|
||||
err: bool = false,
|
||||
suberr: bool = false,
|
||||
shared_size: u64 = 0,
|
||||
shared_blocks: u64 = 0,
|
||||
inodes: Inodes = Inodes.init(main.allocator),
|
||||
|
||||
const Inodes = std.AutoHashMap(u64, Inode);
|
||||
const Inode = struct {
|
||||
size: u64,
|
||||
blocks: u64,
|
||||
nlink: u32,
|
||||
nfound: u32,
|
||||
};
|
||||
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (sp == .err) d.suberr = true;
|
||||
d.last_sub = t.itemStart(sp, d.last_sub, name);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (stat.etype != .link) {
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
}
|
||||
d.last_sub = t.itemStart(stat.etype, d.last_sub, name);
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(stat.blocks));
|
||||
|
||||
if (stat.etype == .link) {
|
||||
const lnk = d.inodes.getOrPut(stat.ino) catch unreachable;
|
||||
if (!lnk.found_existing) lnk.value_ptr.* = .{
|
||||
.size = stat.size,
|
||||
.blocks = stat.blocks,
|
||||
.nlink = stat.nlink,
|
||||
.nfound = 1,
|
||||
} else lnk.value_ptr.nfound += 1;
|
||||
t.itemKey(.ino);
|
||||
t.cborHead(.pos, stat.ino);
|
||||
t.itemKey(.nlink);
|
||||
t.cborHead(.pos, stat.nlink);
|
||||
}
|
||||
|
||||
t.itemExt(stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, stat: *const sink.Stat) Dir {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.err = true;
|
||||
}
|
||||
|
||||
// XXX: older JSON exports did not include the nlink count and have
|
||||
// this field set to '0'. We can deal with that when importing to
|
||||
// mem_sink, but the hardlink counting algorithm used here really does need
|
||||
// that information. Current code makes sure to count such links only once
|
||||
// per dir, but does not count them towards the shared_* fields. That
|
||||
// behavior is similar to ncdu 1.x, but the difference between memory
|
||||
// import and this file export might be surprising.
|
||||
fn countLinks(d: *Dir, parent: ?*Dir) void {
|
||||
var parent_new: u32 = 0;
|
||||
var it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
d.size +|= v.size;
|
||||
d.blocks +|= v.blocks;
|
||||
if (v.nlink > 1 and v.nfound < v.nlink) {
|
||||
d.shared_size +|= v.size;
|
||||
d.shared_blocks +|= v.blocks;
|
||||
}
|
||||
|
||||
const p = parent orelse continue;
|
||||
// All contained in this dir, no need to keep this entry around
|
||||
if (v.nlink > 0 and v.nfound >= v.nlink) {
|
||||
p.size +|= v.size;
|
||||
p.blocks +|= v.blocks;
|
||||
_ = d.inodes.remove(kv.key_ptr.*);
|
||||
} else if (!p.inodes.contains(kv.key_ptr.*))
|
||||
parent_new += 1;
|
||||
}
|
||||
|
||||
// Merge remaining inodes into parent
|
||||
const p = parent orelse return;
|
||||
if (d.inodes.count() == 0) return;
|
||||
|
||||
// If parent is empty, just transfer
|
||||
if (p.inodes.count() == 0) {
|
||||
p.inodes.deinit();
|
||||
p.inodes = d.inodes;
|
||||
d.inodes = Inodes.init(main.allocator); // So we can deinit() without affecting parent
|
||||
// Otherwise, merge
|
||||
} else {
|
||||
p.inodes.ensureUnusedCapacity(parent_new) catch unreachable;
|
||||
it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
const plnk = p.inodes.getOrPutAssumeCapacity(kv.key_ptr.*);
|
||||
if (!plnk.found_existing) plnk.value_ptr.* = v.*
|
||||
else plnk.value_ptr.*.nfound += v.nfound;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn final(d: *Dir, t: *Thread, name: []const u8, parent: ?*Dir) void {
|
||||
if (parent) |p| p.lock.lock();
|
||||
defer if (parent) |p| p.lock.unlock();
|
||||
|
||||
if (parent) |p| {
|
||||
// Different dev? Don't merge the 'inodes' sets, just count the
|
||||
// links here first so the sizes get added to the parent.
|
||||
if (p.stat.dev != d.stat.dev) d.countLinks(null);
|
||||
|
||||
p.items += d.items;
|
||||
p.size +|= d.size;
|
||||
p.blocks +|= d.blocks;
|
||||
if (d.suberr or d.err) p.suberr = true;
|
||||
|
||||
// Same dir, merge inodes
|
||||
if (p.stat.dev == d.stat.dev) d.countLinks(p);
|
||||
|
||||
p.last_sub = t.itemStart(.dir, p.last_sub, name);
|
||||
} else {
|
||||
d.countLinks(null);
|
||||
global.root_itemref = t.itemStart(.dir, null, name);
|
||||
}
|
||||
d.inodes.deinit();
|
||||
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, d.stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.stat.blocks));
|
||||
if (parent == null or parent.?.stat.dev != d.stat.dev) {
|
||||
t.itemKey(.dev);
|
||||
t.cborHead(.pos, d.stat.dev);
|
||||
}
|
||||
if (d.err or d.suberr) {
|
||||
t.itemKey(.rderr);
|
||||
t.cborHead(.simple, if (d.err) 21 else 20);
|
||||
}
|
||||
t.itemKey(.cumasize);
|
||||
t.cborHead(.pos, d.size +| d.stat.size);
|
||||
t.itemKey(.cumdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.blocks +| d.stat.blocks));
|
||||
if (d.shared_size > 0) {
|
||||
t.itemKey(.shrasize);
|
||||
t.cborHead(.pos, d.shared_size);
|
||||
}
|
||||
if (d.shared_blocks > 0) {
|
||||
t.itemKey(.shrdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.shared_blocks));
|
||||
}
|
||||
t.itemKey(.items);
|
||||
t.cborHead(.pos, d.items);
|
||||
t.itemRef(.sub, d.last_sub);
|
||||
t.itemExt(&d.stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub fn createRoot(stat: *const sink.Stat, threads: []sink.Thread) Dir {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.buf = main.allocator.alloc(u8, blockSize(0)) catch unreachable;
|
||||
}
|
||||
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn done(threads: []sink.Thread) void {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.flush(0);
|
||||
main.allocator.free(t.sink.bin.buf);
|
||||
}
|
||||
|
||||
while (std.mem.endsWith(u8, global.index.items, &[1]u8{0}**8))
|
||||
global.index.shrinkRetainingCapacity(global.index.items.len - 8);
|
||||
global.index.appendSlice(main.allocator, &bigu64(global.root_itemref)) catch unreachable;
|
||||
global.index.appendSlice(main.allocator, &blockHeader(1, @intCast(global.index.items.len + 4))) catch unreachable;
|
||||
global.index.items[0..4].* = blockHeader(1, @intCast(global.index.items.len));
|
||||
global.fd.writeAll(global.index.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.index.clearAndFree(main.allocator);
|
||||
|
||||
global.fd.close();
|
||||
}
|
||||
|
||||
pub fn setupOutput(fd: std.fs.File) void {
|
||||
global.fd = fd;
|
||||
fd.writeAll(SIGNATURE) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.file_off = 8;
|
||||
|
||||
// Placeholder for the index block header.
|
||||
global.index.appendSlice(main.allocator, "aaaa") catch unreachable;
|
||||
}
|
||||
521
src/bin_reader.zig
Normal file
521
src/bin_reader.zig
Normal file
|
|
@ -0,0 +1,521 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const util = @import("util.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const CborMajor = bin_export.CborMajor;
|
||||
const ItemKey = bin_export.ItemKey;
|
||||
|
||||
// Two ways to read a bin export:
|
||||
//
|
||||
// 1. Streaming import
|
||||
// - Read blocks sequentially, assemble items into model.Entry's and stitch
|
||||
// them together on the go.
|
||||
// - Does not use the sink.zig API, since sub-level items are read before their parent dirs.
|
||||
// - Useful when:
|
||||
// - User attempts to do a refresh or delete while browsing a file through (2)
|
||||
// - Reading from a stream
|
||||
//
|
||||
// 2. Random access browsing
|
||||
// - Read final block first to get the root item, then have browser.zig fetch
|
||||
// dir listings from this file.
|
||||
// - The default reader mode, requires much less memory than (1) and provides
|
||||
// a snappier first-browsing experience.
|
||||
//
|
||||
// The approach from (2) can also be used to walk through the entire directory
|
||||
// tree and stream it to sink.zig (either for importing or converting to JSON).
|
||||
// That would allow for better code reuse and low-memory conversion, but
|
||||
// performance will not be as good as a direct streaming read. Needs
|
||||
// benchmarks.
|
||||
//
|
||||
// This file only implements (2) at the moment.
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: []u8 = undefined;
|
||||
var blocks: [8]Block = [1]Block{.{}}**8;
|
||||
var counter: u64 = 0;
|
||||
|
||||
// Last itemref being read/parsed. This is a hack to provide *some* context on error.
|
||||
// Providing more context mainly just bloats the binary and decreases
|
||||
// performance for fairly little benefit. Nobody's going to debug a corrupted export.
|
||||
var lastitem: ?u64 = null;
|
||||
};
|
||||
|
||||
|
||||
const Block = struct {
|
||||
num: u32 = std.math.maxInt(u32),
|
||||
last: u64 = 0,
|
||||
data: []u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
inline fn bigu16(v: [2]u8) u16 { return std.mem.bigToNative(u16, @bitCast(v)); }
|
||||
inline fn bigu32(v: [4]u8) u32 { return std.mem.bigToNative(u32, @bitCast(v)); }
|
||||
inline fn bigu64(v: [8]u8) u64 { return std.mem.bigToNative(u64, @bitCast(v)); }
|
||||
|
||||
fn die() noreturn {
|
||||
@branchHint(.cold);
|
||||
if (global.lastitem) |e| ui.die("Error reading item {x} from file\n", .{e})
|
||||
else ui.die("Error reading from file\n", .{});
|
||||
}
|
||||
|
||||
|
||||
fn readBlock(num: u32) []const u8 {
|
||||
// Simple linear search, only suitable if we keep the number of in-memory blocks small.
|
||||
var block: *Block = &global.blocks[0];
|
||||
for (&global.blocks) |*b| {
|
||||
if (b.num == num) {
|
||||
if (b.last != global.counter) {
|
||||
global.counter += 1;
|
||||
b.last = global.counter;
|
||||
}
|
||||
return b.data;
|
||||
}
|
||||
if (block.last > b.last) block = b;
|
||||
}
|
||||
if (block.num != std.math.maxInt(u32))
|
||||
main.allocator.free(block.data);
|
||||
block.num = num;
|
||||
global.counter += 1;
|
||||
block.last = global.counter;
|
||||
|
||||
if (num > global.index.len/8 - 1) die();
|
||||
const offlen = bigu64(global.index[num*8..][0..8].*);
|
||||
const off = offlen >> 24;
|
||||
const len = offlen & 0xffffff;
|
||||
if (len <= 12) die();
|
||||
|
||||
// Only read the compressed data part, assume block header, number and footer are correct.
|
||||
const buf = main.allocator.alloc(u8, @intCast(len - 12)) catch unreachable;
|
||||
defer main.allocator.free(buf);
|
||||
const rdlen = global.fd.preadAll(buf, off + 8)
|
||||
catch |e| ui.die("Error reading from file: {s}\n", .{ui.errorString(e)});
|
||||
if (rdlen != buf.len) die();
|
||||
|
||||
const rawlen = c.ZSTD_getFrameContentSize(buf.ptr, buf.len);
|
||||
if (rawlen <= 0 or rawlen >= (1<<24)) die();
|
||||
block.data = main.allocator.alloc(u8, @intCast(rawlen)) catch unreachable;
|
||||
|
||||
const res = c.ZSTD_decompress(block.data.ptr, block.data.len, buf.ptr, buf.len);
|
||||
if (res != block.data.len) ui.die("Error decompressing block {} (expected {} got {})\n", .{ num, block.data.len, res });
|
||||
|
||||
return block.data;
|
||||
}
|
||||
|
||||
|
||||
const CborReader = struct {
|
||||
buf: []const u8,
|
||||
|
||||
fn head(r: *CborReader) CborVal {
|
||||
if (r.buf.len < 1) die();
|
||||
var v = CborVal{
|
||||
.rd = r,
|
||||
.major = @enumFromInt(r.buf[0] >> 5),
|
||||
.indef = false,
|
||||
.arg = 0,
|
||||
};
|
||||
switch (r.buf[0] & 0x1f) {
|
||||
0x00...0x17 => |n| {
|
||||
v.arg = n;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
0x18 => {
|
||||
if (r.buf.len < 2) die();
|
||||
v.arg = r.buf[1];
|
||||
r.buf = r.buf[2..];
|
||||
},
|
||||
0x19 => {
|
||||
if (r.buf.len < 3) die();
|
||||
v.arg = bigu16(r.buf[1..3].*);
|
||||
r.buf = r.buf[3..];
|
||||
},
|
||||
0x1a => {
|
||||
if (r.buf.len < 5) die();
|
||||
v.arg = bigu32(r.buf[1..5].*);
|
||||
r.buf = r.buf[5..];
|
||||
},
|
||||
0x1b => {
|
||||
if (r.buf.len < 9) die();
|
||||
v.arg = bigu64(r.buf[1..9].*);
|
||||
r.buf = r.buf[9..];
|
||||
},
|
||||
0x1f => switch (v.major) {
|
||||
.bytes, .text, .array, .map, .simple => {
|
||||
v.indef = true;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
else => die(),
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
// Read the next CBOR value, skipping any tags
|
||||
fn next(r: *CborReader) CborVal {
|
||||
while (true) {
|
||||
const v = r.head();
|
||||
if (v.major != .tag) return v;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const CborVal = struct {
|
||||
rd: *CborReader,
|
||||
major: CborMajor,
|
||||
indef: bool,
|
||||
arg: u64,
|
||||
|
||||
fn end(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.indef;
|
||||
}
|
||||
|
||||
fn int(v: *const CborVal, T: type) T {
|
||||
switch (v.major) {
|
||||
.pos => return std.math.cast(T, v.arg) orelse die(),
|
||||
.neg => {
|
||||
if (std.math.minInt(T) == 0) die();
|
||||
if (v.arg > std.math.maxInt(T)) die();
|
||||
return -@as(T, @intCast(v.arg)) + (-1);
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
}
|
||||
|
||||
fn isTrue(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.arg == 21;
|
||||
}
|
||||
|
||||
// Read either a byte or text string.
|
||||
// Doesn't validate UTF-8 strings, doesn't support indefinite-length strings.
|
||||
fn bytes(v: *const CborVal) []const u8 {
|
||||
if (v.indef or (v.major != .bytes and v.major != .text)) die();
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
defer v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
return v.rd.buf[0..@intCast(v.arg)];
|
||||
}
|
||||
|
||||
// Skip current value.
|
||||
fn skip(v: *const CborVal) void {
|
||||
// indefinite-length bytes, text, array or map; skip till break marker.
|
||||
if (v.major != .simple and v.indef) {
|
||||
while (true) {
|
||||
const n = v.rd.next();
|
||||
if (n.end()) return;
|
||||
n.skip();
|
||||
}
|
||||
}
|
||||
switch (v.major) {
|
||||
.bytes, .text => {
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
},
|
||||
.array => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg)) |_| v.rd.next().skip();
|
||||
},
|
||||
.map => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg*|2)) |_| v.rd.next().skip();
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn etype(v: *const CborVal) model.EType {
|
||||
const n = v.int(i32);
|
||||
return std.meta.intToEnum(model.EType, n)
|
||||
catch if (n < 0) .pattern else .nonreg;
|
||||
}
|
||||
|
||||
fn itemref(v: *const CborVal, cur: u64) u64 {
|
||||
if (v.major == .pos) return v.arg;
|
||||
if (v.major == .neg) {
|
||||
if (v.arg >= (cur & 0xffffff)) die();
|
||||
return cur - v.arg - 1;
|
||||
}
|
||||
return die();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
test "CBOR int parsing" {
|
||||
inline for (.{
|
||||
.{ .in = "\x00", .t = u1, .exp = 0 },
|
||||
.{ .in = "\x01", .t = u1, .exp = 1 },
|
||||
.{ .in = "\x18\x18", .t = u8, .exp = 0x18 },
|
||||
.{ .in = "\x18\xff", .t = u8, .exp = 0xff },
|
||||
.{ .in = "\x19\x07\xff", .t = u64, .exp = 0x7ff },
|
||||
.{ .in = "\x19\xff\xff", .t = u64, .exp = 0xffff },
|
||||
.{ .in = "\x1a\x00\x01\x00\x00", .t = u64, .exp = 0x10000 },
|
||||
.{ .in = "\x1b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.maxInt(i64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = u64, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x20", .t = i1, .exp = -1 },
|
||||
.{ .in = "\x38\x18", .t = i8, .exp = -0x19 },
|
||||
.{ .in = "\x39\x01\xf3", .t = i16, .exp = -500 },
|
||||
.{ .in = "\x3a\xfe\xdc\xba\x97", .t = i33, .exp = -0xfedc_ba98 },
|
||||
.{ .in = "\x3b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.minInt(i64) },
|
||||
.{ .in = "\x3b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.minInt(i65) },
|
||||
}) |t| {
|
||||
var r = CborReader{.buf = t.in};
|
||||
try std.testing.expectEqual(@as(t.t, t.exp), r.next().int(t.t));
|
||||
try std.testing.expectEqual(0, r.buf.len);
|
||||
}
|
||||
}
|
||||
|
||||
test "CBOR string parsing" {
|
||||
var r = CborReader{.buf="\x40"};
|
||||
try std.testing.expectEqualStrings("", r.next().bytes());
|
||||
r.buf = "\x45\x00\x01\x02\x03\x04x";
|
||||
try std.testing.expectEqualStrings("\x00\x01\x02\x03\x04", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("x", r.buf);
|
||||
r.buf = "\x78\x241234567890abcdefghijklmnopqrstuvwxyz-end";
|
||||
try std.testing.expectEqualStrings("1234567890abcdefghijklmnopqrstuvwxyz", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("-end", r.buf);
|
||||
}
|
||||
|
||||
test "CBOR skip parsing" {
|
||||
inline for (.{
|
||||
"\x00",
|
||||
"\x40",
|
||||
"\x41a",
|
||||
"\x5f\xff",
|
||||
"\x5f\x41a\xff",
|
||||
"\x80",
|
||||
"\x81\x00",
|
||||
"\x9f\xff",
|
||||
"\x9f\x9f\xff\xff",
|
||||
"\x9f\x9f\x81\x00\xff\xff",
|
||||
"\xa0",
|
||||
"\xa1\x00\x01",
|
||||
"\xbf\xff",
|
||||
"\xbf\xc0\x00\x9f\xff\xff",
|
||||
}) |s| {
|
||||
var r = CborReader{.buf = s ++ "garbage"};
|
||||
r.next().skip();
|
||||
try std.testing.expectEqualStrings(r.buf, "garbage");
|
||||
}
|
||||
}
|
||||
|
||||
const ItemParser = struct {
|
||||
r: CborReader,
|
||||
len: ?u64 = null,
|
||||
|
||||
const Field = struct {
|
||||
key: ItemKey,
|
||||
val: CborVal,
|
||||
};
|
||||
|
||||
fn init(buf: []const u8) ItemParser {
|
||||
var r = ItemParser{.r = .{.buf = buf}};
|
||||
const head = r.r.next();
|
||||
if (head.major != .map) die();
|
||||
if (!head.indef) r.len = head.arg;
|
||||
return r;
|
||||
}
|
||||
|
||||
fn key(r: *ItemParser) ?CborVal {
|
||||
if (r.len) |*l| {
|
||||
if (l.* == 0) return null;
|
||||
l.* -= 1;
|
||||
return r.r.next();
|
||||
} else {
|
||||
const v = r.r.next();
|
||||
return if (v.end()) null else v;
|
||||
}
|
||||
}
|
||||
|
||||
// Skips over any fields that don't fit into an ItemKey.
|
||||
fn next(r: *ItemParser) ?Field {
|
||||
while (r.key()) |k| {
|
||||
if (k.major == .pos and k.arg <= std.math.maxInt(@typeInfo(ItemKey).@"enum".tag_type)) return .{
|
||||
.key = @enumFromInt(k.arg),
|
||||
.val = r.r.next(),
|
||||
} else {
|
||||
k.skip();
|
||||
r.r.next().skip();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
// Returned buffer is valid until the next readItem().
|
||||
fn readItem(ref: u64) ItemParser {
|
||||
global.lastitem = ref;
|
||||
if (ref >= (1 << (24 + 32))) die();
|
||||
const block = readBlock(@intCast(ref >> 24));
|
||||
if ((ref & 0xffffff) >= block.len) die();
|
||||
return ItemParser.init(block[@intCast(ref & 0xffffff)..]);
|
||||
}
|
||||
|
||||
const Import = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
fields: Fields = .{},
|
||||
p: ItemParser = undefined,
|
||||
|
||||
const Fields = struct {
|
||||
name: []const u8 = "",
|
||||
rderr: bool = false,
|
||||
prev: ?u64 = null,
|
||||
sub: ?u64 = null,
|
||||
};
|
||||
|
||||
fn readFields(ctx: *Import, ref: u64) void {
|
||||
ctx.p = readItem(ref);
|
||||
var hastype = false;
|
||||
|
||||
while (ctx.p.next()) |kv| switch (kv.key) {
|
||||
.type => {
|
||||
ctx.stat.etype = kv.val.etype();
|
||||
hastype = true;
|
||||
},
|
||||
.name => ctx.fields.name = kv.val.bytes(),
|
||||
.prev => ctx.fields.prev = kv.val.itemref(ref),
|
||||
.asize => ctx.stat.size = kv.val.int(u64),
|
||||
.dsize => ctx.stat.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.dev => ctx.stat.dev = kv.val.int(u64),
|
||||
.rderr => ctx.fields.rderr = kv.val.isTrue(),
|
||||
.sub => ctx.fields.sub = kv.val.itemref(ref),
|
||||
.ino => ctx.stat.ino = kv.val.int(u64),
|
||||
.nlink => ctx.stat.nlink = kv.val.int(u31),
|
||||
.uid => { ctx.stat.ext.uid = kv.val.int(u32); ctx.stat.ext.pack.hasuid = true; },
|
||||
.gid => { ctx.stat.ext.gid = kv.val.int(u32); ctx.stat.ext.pack.hasgid = true; },
|
||||
.mode => { ctx.stat.ext.mode = kv.val.int(u16); ctx.stat.ext.pack.hasmode = true; },
|
||||
.mtime => { ctx.stat.ext.mtime = kv.val.int(u64); ctx.stat.ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
|
||||
if (!hastype) die();
|
||||
if (ctx.fields.name.len == 0) die();
|
||||
}
|
||||
|
||||
fn import(ctx: *Import, ref: u64, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.fields = .{};
|
||||
ctx.readFields(ref);
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const prev = ctx.fields.prev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, ctx.fields.name, &ctx.stat)
|
||||
else sink.createRoot(ctx.fields.name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.fields.rderr) dir.setReadError(ctx.sink);
|
||||
|
||||
ctx.fields.prev = ctx.fields.sub;
|
||||
while (ctx.fields.prev) |n| ctx.import(n, dir, ctx.stat.dev);
|
||||
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
ctx.fields.prev = prev;
|
||||
|
||||
} else {
|
||||
const p = parent orelse die();
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
p.addSpecial(ctx.sink, ctx.fields.name, ctx.stat.etype)
|
||||
else
|
||||
p.addStat(ctx.sink, ctx.fields.name, &ctx.stat);
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve an itemref and return a newly allocated entry.
|
||||
// Dir.parent and Link.next/prev are left uninitialized.
|
||||
pub fn get(ref: u64, alloc: std.mem.Allocator) *model.Entry {
|
||||
const parser = readItem(ref);
|
||||
|
||||
var etype: ?model.EType = null;
|
||||
var name: []const u8 = "";
|
||||
var p = parser;
|
||||
var ext = model.Ext{};
|
||||
while (p.next()) |kv| {
|
||||
switch (kv.key) {
|
||||
.type => etype = kv.val.etype(),
|
||||
.name => name = kv.val.bytes(),
|
||||
.uid => { ext.uid = kv.val.int(u32); ext.pack.hasuid = true; },
|
||||
.gid => { ext.gid = kv.val.int(u32); ext.pack.hasgid = true; },
|
||||
.mode => { ext.mode = kv.val.int(u16); ext.pack.hasmode = true; },
|
||||
.mtime => { ext.mtime = kv.val.int(u64); ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
}
|
||||
}
|
||||
if (etype == null or name.len == 0) die();
|
||||
|
||||
var entry = model.Entry.create(alloc, etype.?, main.config.extended and !ext.isEmpty(), name);
|
||||
entry.next = .{ .ref = std.math.maxInt(u64) };
|
||||
if (entry.ext()) |e| e.* = ext;
|
||||
if (entry.dir()) |d| d.sub = .{ .ref = std.math.maxInt(u64) };
|
||||
p = parser;
|
||||
while (p.next()) |kv| switch (kv.key) {
|
||||
.prev => entry.next = .{ .ref = kv.val.itemref(ref) },
|
||||
.asize => { if (entry.pack.etype != .dir) entry.size = kv.val.int(u64); },
|
||||
.dsize => { if (entry.pack.etype != .dir) entry.pack.blocks = @intCast(kv.val.int(u64)/512); },
|
||||
|
||||
.rderr => { if (entry.dir()) |d| {
|
||||
if (kv.val.isTrue()) d.pack.err = true
|
||||
else d.pack.suberr = true;
|
||||
} },
|
||||
.dev => { if (entry.dir()) |d| d.pack.dev = model.devices.getId(kv.val.int(u64)); },
|
||||
.cumasize => entry.size = kv.val.int(u64),
|
||||
.cumdsize => entry.pack.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.shrasize => { if (entry.dir()) |d| d.shared_size = kv.val.int(u64); },
|
||||
.shrdsize => { if (entry.dir()) |d| d.shared_blocks = kv.val.int(u64)/512; },
|
||||
.items => { if (entry.dir()) |d| d.items = util.castClamp(u32, kv.val.int(u64)); },
|
||||
.sub => { if (entry.dir()) |d| d.sub = .{ .ref = kv.val.itemref(ref) }; },
|
||||
|
||||
.ino => { if (entry.link()) |l| l.ino = kv.val.int(u64); },
|
||||
.nlink => { if (entry.link()) |l| l.pack.nlink = kv.val.int(u31); },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
return entry;
|
||||
}
|
||||
|
||||
pub fn getRoot() u64 {
|
||||
return bigu64(global.index[global.index.len-8..][0..8].*);
|
||||
}
|
||||
|
||||
// Walk through the directory tree in depth-first order and pass results to sink.zig.
|
||||
// Depth-first is required for JSON export, but more efficient strategies are
|
||||
// possible for other sinks. Parallel import is also an option, but that's more
|
||||
// complex and likely less efficient than a streaming import.
|
||||
pub fn import() void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
var ctx = Import{.sink = &sink_threads[0]};
|
||||
ctx.import(getRoot(), null, 0);
|
||||
sink.done();
|
||||
}
|
||||
|
||||
// Assumes that the file signature has already been read and validated.
|
||||
pub fn open(fd: std.fs.File) !void {
|
||||
global.fd = fd;
|
||||
|
||||
// Do not use fd.getEndPos() because that requires newer kernels supporting statx() #261.
|
||||
try fd.seekFromEnd(0);
|
||||
const size = try fd.getPos();
|
||||
if (size < 16) return error.EndOfStream;
|
||||
|
||||
// Read index block
|
||||
var buf: [4]u8 = undefined;
|
||||
if (try fd.preadAll(&buf, size - 4) != 4) return error.EndOfStream;
|
||||
const index_header = bigu32(buf);
|
||||
if ((index_header >> 28) != 1 or (index_header & 7) != 0) die();
|
||||
const len = (index_header & 0x0fffffff) - 8; // excluding block header & footer
|
||||
if (len >= size) die();
|
||||
global.index = main.allocator.alloc(u8, len) catch unreachable;
|
||||
if (try fd.preadAll(global.index, size - len - 4) != global.index.len) return error.EndOfStream;
|
||||
}
|
||||
383
src/browser.c
383
src/browser.c
|
|
@ -1,383 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
struct dir *bcur;
|
||||
int helpwin;
|
||||
|
||||
|
||||
struct dir * removedir(struct dir *dr) {
|
||||
return(dr);
|
||||
}
|
||||
|
||||
int cmp(struct dir *x, struct dir *y) {
|
||||
struct dir *a, *b;
|
||||
int r = 0;
|
||||
|
||||
if(y->flags & FF_PAR)
|
||||
return(1);
|
||||
if(x->flags & FF_PAR)
|
||||
return(-1);
|
||||
|
||||
if(bflags & BF_DESC) {
|
||||
a = y; b = x;
|
||||
} else {
|
||||
b = y; a = x;
|
||||
}
|
||||
if(!(bflags & BF_NDIRF) && y->flags & FF_DIR && !(x->flags & FF_DIR))
|
||||
return(1);
|
||||
if(!(bflags & BF_NDIRF) && !(y->flags & FF_DIR) && x->flags & FF_DIR)
|
||||
return(-1);
|
||||
|
||||
if(bflags & BF_NAME)
|
||||
r = strcmp(a->name, b->name);
|
||||
if(r == 0)
|
||||
r = a->size > b->size ? 1 : (a->size == b->size ? 0 : -1);
|
||||
if(r == 0)
|
||||
r = strcmp(a->name, b->name);
|
||||
return(r);
|
||||
}
|
||||
|
||||
/* Mergesort algorithm, many thanks to
|
||||
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
|
||||
*/
|
||||
struct dir *sortFiles(struct dir *list) {
|
||||
struct dir *p, *q, *e, *tail;
|
||||
int insize, nmerges, psize, qsize, i;
|
||||
|
||||
while(list->prev != NULL)
|
||||
list = list->prev;
|
||||
insize = 1;
|
||||
while(1) {
|
||||
p = list;
|
||||
list = NULL;
|
||||
tail = NULL;
|
||||
nmerges = 0;
|
||||
while(p) {
|
||||
nmerges++;
|
||||
q = p;
|
||||
psize = 0;
|
||||
for(i=0; i<insize; i++) {
|
||||
psize++;
|
||||
q = q->next;
|
||||
if(!q) break;
|
||||
}
|
||||
qsize = insize;
|
||||
while(psize > 0 || (qsize > 0 && q)) {
|
||||
if(psize == 0) {
|
||||
e = q; q = q->next; qsize--;
|
||||
} else if(qsize == 0 || !q) {
|
||||
e = p; p = p->next; psize--;
|
||||
} else if(cmp(p,q) <= 0) {
|
||||
e = p; p = p->next; psize--;
|
||||
} else {
|
||||
e = q; q = q->next; qsize--;
|
||||
}
|
||||
if(tail) tail->next = e;
|
||||
else list = e;
|
||||
e->prev = tail;
|
||||
tail = e;
|
||||
}
|
||||
p = q;
|
||||
}
|
||||
tail->next = NULL;
|
||||
if(nmerges <= 1)
|
||||
return list;
|
||||
insize *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char graphdat[11];
|
||||
char *graph(off_t max, off_t size) {
|
||||
int i, c = (int) (((float) size / (float) max) * 10.0f);
|
||||
for(i=0; i<10; i++)
|
||||
graphdat[i] = i < c ? '#' : ' ';
|
||||
graphdat[10] = '\0';
|
||||
return graphdat;
|
||||
}
|
||||
|
||||
|
||||
#define exlhid(x) if(bflags & BF_HIDE && (\
|
||||
(!(x->flags & FF_PAR) && x->name[0] == '.')\
|
||||
|| x->flags & FF_EXL)\
|
||||
) { i--; continue; }
|
||||
|
||||
void drawBrowser(int change) {
|
||||
struct dir *n;
|
||||
char tmp[PATH_MAX], ct, dt;
|
||||
int selected, i, o;
|
||||
off_t max;
|
||||
|
||||
erase();
|
||||
|
||||
/* exit if there are no items to display */
|
||||
if(bcur->parent == NULL) {
|
||||
if(bcur->sub == NULL) {
|
||||
erase();
|
||||
refresh();
|
||||
endwin();
|
||||
printf("No items to display...\n");
|
||||
exit(0);
|
||||
} else
|
||||
bcur = bcur->sub;
|
||||
}
|
||||
|
||||
/* create header and status bar */
|
||||
attron(A_REVERSE);
|
||||
mvhline(0, 0, ' ', wincols);
|
||||
mvhline(winrows-1, 0, ' ', wincols);
|
||||
mvprintw(0,0,"%s %s ~ Use the arrow keys to navigate, press ? for help", PACKAGE_NAME, PACKAGE_VERSION);
|
||||
|
||||
mvprintw(winrows-1, 0, " Total size: %s Files: %-6d Dirs: %-6d",
|
||||
cropsize(bcur->parent->size), bcur->parent->files, bcur->parent->dirs);
|
||||
attroff(A_REVERSE);
|
||||
|
||||
mvhline(1, 0, '-', wincols);
|
||||
mvaddstr(1, 3, cropdir(getpath(bcur, tmp), wincols-5));
|
||||
|
||||
/* make sure we have the first item, and the items are in correct order */
|
||||
bcur = sortFiles(bcur);
|
||||
while(bcur->prev != NULL)
|
||||
bcur = bcur->prev;
|
||||
|
||||
/* get maximum size and selected item */
|
||||
for(n = bcur, selected = i = 0; n != NULL; n = n->next, i++) {
|
||||
exlhid(n)
|
||||
if(n->flags & FF_BSEL)
|
||||
selected = i;
|
||||
if(n->size > max)
|
||||
max = n->size;
|
||||
}
|
||||
|
||||
if(selected+change < 0)
|
||||
change -= selected+change;
|
||||
if(selected+change > --i)
|
||||
change -= (selected+change)-i;
|
||||
for(n = bcur, i = 0; n != NULL; n = n->next, i++) {
|
||||
exlhid(n)
|
||||
if(i == selected && n->flags & FF_BSEL)
|
||||
n->flags -= FF_BSEL;
|
||||
if(i == selected+change)
|
||||
n->flags |= FF_BSEL;
|
||||
}
|
||||
selected += change;
|
||||
|
||||
/* determine the listing format */
|
||||
switch(bgraph) {
|
||||
case 0:
|
||||
sprintf(tmp, "%%c %%7s %%c%%-%ds", wincols-12);
|
||||
break;
|
||||
case 1:
|
||||
sprintf(tmp, "%%c %%7s [%%10s] %%c%%-%ds", wincols-24);
|
||||
break;
|
||||
case 2:
|
||||
sprintf(tmp, "%%c %%7s [%%4.1f%%%%] %%c%%-%ds", wincols-19);
|
||||
break;
|
||||
case 3:
|
||||
sprintf(tmp, "%%c %%7s [%%4.1f%%%% %%10s] %%c%%-%ds", wincols-30);
|
||||
}
|
||||
|
||||
/* determine start position */
|
||||
for(n = bcur, i = 0; n != NULL; n = n->next, i++) {
|
||||
exlhid(n)
|
||||
if(i == (selected / (winrows-3)) * (winrows-3))
|
||||
break;
|
||||
}
|
||||
selected -= i;
|
||||
|
||||
/* print the list to the screen */
|
||||
for(i=0; n != NULL && i < winrows-3; n = n->next, i++) {
|
||||
exlhid(n)
|
||||
if(i == selected)
|
||||
attron(A_REVERSE);
|
||||
|
||||
/* reference to parent dir has a different format */
|
||||
if(n->flags & FF_PAR) {
|
||||
mvhline(i+2, 0, ' ', wincols);
|
||||
o = bgraph == 0 ? 11 :
|
||||
bgraph == 1 ? 23 :
|
||||
bgraph == 2 ? 18 :
|
||||
29 ;
|
||||
mvaddstr(i+2, o, "/..");
|
||||
if(i == selected)
|
||||
attroff(A_REVERSE);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* determine indication character */
|
||||
ct = n->flags & FF_EXL ? '<' :
|
||||
n->flags & FF_ERR ? '!' :
|
||||
n->flags & FF_SERR ? '.' :
|
||||
n->flags & FF_OTHFS ? '>' :
|
||||
n->flags & FF_OTHER ? '@' :
|
||||
n->flags & FF_DIR
|
||||
&& n->sub == NULL ? 'e' :
|
||||
' ' ;
|
||||
dt = n->flags & FF_DIR ? '/' : ' ';
|
||||
|
||||
/* format and add item to the list */
|
||||
switch(bgraph) {
|
||||
case 0:
|
||||
mvprintw(i+2, 0, tmp, ct, cropsize(n->size),
|
||||
dt, cropdir(n->name, wincols-12)
|
||||
);
|
||||
break;
|
||||
case 1:
|
||||
mvprintw(i+2, 0, tmp, ct, cropsize(n->size),
|
||||
graph(max, n->size),
|
||||
dt, cropdir(n->name, wincols-24)
|
||||
);
|
||||
break;
|
||||
case 2:
|
||||
mvprintw(i+2, 0, tmp, ct, cropsize(n->size),
|
||||
((float) n->size / (float) n->parent->size) * 100.0f,
|
||||
dt, cropdir(n->name, wincols-19)
|
||||
);
|
||||
break;
|
||||
case 3:
|
||||
mvprintw(i+2, 0, tmp, ct, cropsize(n->size),
|
||||
((float) n->size / (float) n->parent->size) * 100.0f, graph(max, n->size),
|
||||
dt, cropdir(n->name, wincols-30)
|
||||
);
|
||||
}
|
||||
|
||||
if(i == selected)
|
||||
attroff(A_REVERSE);
|
||||
}
|
||||
}
|
||||
|
||||
struct dir * selected(void) {
|
||||
struct dir *n = bcur;
|
||||
do {
|
||||
if(n->flags & FF_BSEL)
|
||||
return n;
|
||||
} while((n = n->next) != NULL);
|
||||
}
|
||||
|
||||
|
||||
#define toggle(x,y) if(x & y) x -=y; else x |= y
|
||||
|
||||
void showBrowser(void) {
|
||||
int ch, change;
|
||||
struct dir *n;
|
||||
|
||||
bcur = dat.sub;
|
||||
bgraph = 1;
|
||||
nodelay(stdscr, 0);
|
||||
bflags = BF_SIZE | BF_DESC | BF_NDIRF;
|
||||
|
||||
drawBrowser(0);
|
||||
refresh();
|
||||
|
||||
while((ch = getch())) {
|
||||
change = 0;
|
||||
switch(ch) {
|
||||
/* selecting items */
|
||||
case KEY_UP:
|
||||
change = -1;
|
||||
break;
|
||||
case KEY_DOWN:
|
||||
change = 1;
|
||||
break;
|
||||
case KEY_HOME:
|
||||
change = -16777216;
|
||||
break;
|
||||
case KEY_LL:
|
||||
case KEY_END:
|
||||
change = 16777216;
|
||||
break;
|
||||
case KEY_PPAGE:
|
||||
change = -1*(winrows-3);
|
||||
break;
|
||||
case KEY_NPAGE:
|
||||
change = winrows-3;
|
||||
break;
|
||||
|
||||
/* sorting items */
|
||||
case 'n':
|
||||
if(bflags & BF_NAME)
|
||||
toggle(bflags, BF_DESC);
|
||||
else
|
||||
bflags = (bflags & BF_HIDE) + (bflags & BF_NDIRF) + BF_NAME;
|
||||
break;
|
||||
case 's':
|
||||
if(bflags & BF_SIZE)
|
||||
toggle(bflags, BF_DESC);
|
||||
else
|
||||
bflags = (bflags & BF_HIDE) + (bflags & BF_NDIRF) + BF_SIZE + BF_DESC;
|
||||
break;
|
||||
case 'p':
|
||||
toggle(sflags, SF_SI);
|
||||
break;
|
||||
case 'h':
|
||||
toggle(bflags, BF_HIDE);
|
||||
break;
|
||||
case 't':
|
||||
toggle(bflags, BF_NDIRF);
|
||||
break;
|
||||
|
||||
/* browsing */
|
||||
case 10:
|
||||
case KEY_RIGHT:
|
||||
n = selected();
|
||||
if(n->flags & FF_PAR)
|
||||
bcur = bcur->parent;
|
||||
else if(n->sub != NULL)
|
||||
bcur = n->sub;
|
||||
break;
|
||||
case KEY_LEFT:
|
||||
if(bcur->parent->parent != NULL) {
|
||||
bcur = bcur->parent;
|
||||
}
|
||||
break;
|
||||
|
||||
/* and other stuff */
|
||||
case KEY_RESIZE:
|
||||
ncresize();
|
||||
break;
|
||||
case 'g':
|
||||
if(++bgraph > 3) bgraph = 0;
|
||||
break;
|
||||
case '?':
|
||||
showHelp();
|
||||
break;
|
||||
case 'd':
|
||||
n = selected();
|
||||
if(!(n->flags & FF_PAR))
|
||||
bcur = showDelete(n);
|
||||
break;
|
||||
case 'q':
|
||||
goto endloop;
|
||||
}
|
||||
drawBrowser(change);
|
||||
refresh();
|
||||
}
|
||||
endloop:
|
||||
erase();
|
||||
}
|
||||
|
||||
|
||||
1061
src/browser.zig
Normal file
1061
src/browser.zig
Normal file
File diff suppressed because it is too large
Load diff
20
src/c.zig
Normal file
20
src/c.zig
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const c = @cImport({
|
||||
@cDefine("_XOPEN_SOURCE", "1"); // for wcwidth()
|
||||
@cInclude("stdio.h"); // fopen(), used to initialize ncurses
|
||||
@cInclude("string.h"); // strerror()
|
||||
@cInclude("time.h"); // strftime()
|
||||
@cInclude("wchar.h"); // wcwidth()
|
||||
@cInclude("locale.h"); // setlocale() and localeconv()
|
||||
@cInclude("fnmatch.h"); // fnmatch()
|
||||
@cInclude("unistd.h"); // getuid()
|
||||
@cInclude("sys/types.h"); // struct passwd
|
||||
@cInclude("pwd.h"); // getpwnam(), getpwuid()
|
||||
if (@import("builtin").os.tag == .linux) {
|
||||
@cInclude("sys/vfs.h"); // statfs()
|
||||
}
|
||||
@cInclude("curses.h");
|
||||
@cInclude("zstd.h");
|
||||
});
|
||||
358
src/calc.c
358
src/calc.c
|
|
@ -1,358 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
|
||||
/* My own implementation of realpath()
|
||||
- assumes that *every* possible path fits in PATH_MAX bytes
|
||||
- does not set errno on error
|
||||
- has not yet been fully tested
|
||||
*/
|
||||
char *rpath(const char *from, char *to) {
|
||||
char tmp[PATH_MAX], cwd[PATH_MAX], cur[PATH_MAX], app[PATH_MAX];
|
||||
int i, j, l, k, last, ll = 0;
|
||||
struct stat st;
|
||||
|
||||
getcwd(cwd, PATH_MAX);
|
||||
strcpy(cur, from);
|
||||
app[0] = 0;
|
||||
|
||||
loop:
|
||||
/* not an absolute path, add current directory */
|
||||
if(cur[0] != '/') {
|
||||
if(!(cwd[0] == '/' && cwd[1] == 0))
|
||||
strcpy(tmp, cwd);
|
||||
else
|
||||
tmp[0] = 0;
|
||||
strcat(tmp, "/");
|
||||
strcat(tmp, cur);
|
||||
} else
|
||||
strcpy(tmp, cur);
|
||||
|
||||
/* now fix things like '.' and '..' */
|
||||
i = j = last = 0;
|
||||
l = strlen(tmp);
|
||||
while(1) {
|
||||
if(tmp[i] == 0)
|
||||
break;
|
||||
/* . */
|
||||
if(l >= i+2 && tmp[i] == '/' && tmp[i+1] == '.' && (tmp[i+2] == 0 || tmp[i+2] == '/')) {
|
||||
i+= 2;
|
||||
continue;
|
||||
}
|
||||
/* .. */
|
||||
if(l >= i+3 && tmp[i] == '/' && tmp[i+1] == '.' && tmp[i+2] == '.' && (tmp[i+3] == 0 || tmp[i+3] == '/')) {
|
||||
for(k=j; --k>0;)
|
||||
if(to[k] == '/' && k != j-1)
|
||||
break;
|
||||
j -= j-k;
|
||||
if(j < 1) j = 1;
|
||||
i += 3;
|
||||
continue;
|
||||
}
|
||||
/* remove double slashes */
|
||||
if(tmp[i] == '/' && i>0 && tmp[i-1] == '/') {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
to[j++] = tmp[i++];
|
||||
}
|
||||
/* remove leading slashes */
|
||||
while(--j > 0) {
|
||||
if(to[j] != '/')
|
||||
break;
|
||||
}
|
||||
to[j+1] = 0;
|
||||
/* append 'app' */
|
||||
if(app[0] != 0)
|
||||
strcat(to, app);
|
||||
|
||||
j = strlen(to);
|
||||
/* check for symlinks */
|
||||
for(i=1; i<=j; i++) {
|
||||
if(to[i] == '/' || to[i] == 0) {
|
||||
strncpy(tmp, to, i);
|
||||
tmp[i] = 0;
|
||||
if(lstat(tmp, &st) < 0)
|
||||
return(NULL);
|
||||
if(S_ISLNK(st.st_mode)) {
|
||||
if(++ll > LINK_MAX || (k = readlink(tmp, cur, PATH_MAX)) < 0)
|
||||
return(NULL);
|
||||
cur[k] = 0;
|
||||
if(to[i] != 0)
|
||||
strcpy(app, &to[i]);
|
||||
strcpy(cwd, tmp);
|
||||
for(k=strlen(cwd); --k>0;)
|
||||
if(cwd[k] == '/')
|
||||
break;
|
||||
cwd[k] = 0;
|
||||
goto loop;
|
||||
}
|
||||
if(!S_ISDIR(st.st_mode))
|
||||
return(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return(to);
|
||||
}
|
||||
|
||||
|
||||
WINDOW* calcWin() {
|
||||
WINDOW *calc;
|
||||
calc = newwin(10, 60, winrows/2 - 5, wincols/2 - 30);
|
||||
keypad(calc, TRUE);
|
||||
box(calc, 0, 0);
|
||||
wattron(calc, A_BOLD);
|
||||
mvwaddstr(calc, 0, 4, "Calculating...");
|
||||
wattroff(calc, A_BOLD);
|
||||
mvwaddstr(calc, 2, 2, "Total files:");
|
||||
mvwaddstr(calc, 2, 24, "dirs:");
|
||||
mvwaddstr(calc, 2, 39, "size:");
|
||||
mvwaddstr(calc, 3, 2, "Current dir:");
|
||||
mvwaddstr(calc, 8, 43, "Press q to quit");
|
||||
return(calc);
|
||||
}
|
||||
|
||||
int calcUsage() {
|
||||
WINDOW *calc;
|
||||
DIR *dir;
|
||||
char antext[15] = "Calculating...";
|
||||
int ch, anpos = 0, level = 0, i, cdir1len, namelen;
|
||||
char cdir[PATH_MAX], emsg[PATH_MAX], tmp[PATH_MAX], err = 0, *f,
|
||||
*cdir1, direrr, staterr;
|
||||
dev_t dev = (dev_t) NULL;
|
||||
struct dirent *dr;
|
||||
struct stat fs;
|
||||
struct dir *d, *dirs[512]; /* 512 recursive directories should be enough for everyone! */
|
||||
struct timeval tv; suseconds_t l;
|
||||
gettimeofday(&tv, (void *)NULL);
|
||||
l = (1000*(tv.tv_sec % 1000) + (tv.tv_usec / 1000)) / sdelay - 1;
|
||||
|
||||
calc = calcWin();
|
||||
wrefresh(calc);
|
||||
|
||||
memset(dirs, 0, sizeof(struct dir *)*512);
|
||||
level = 0;
|
||||
dirs[level] = &dat;
|
||||
memset(&dat, 0, sizeof(dat));
|
||||
|
||||
if(rpath(sdir, tmp) == NULL || stat(tmp, &fs) != 0 || !S_ISDIR(fs.st_mode)) {
|
||||
mvwaddstr(calc, 8, 1, " ");
|
||||
wattron(calc, A_BOLD);
|
||||
mvwaddstr(calc, 5, 2, "Error:");
|
||||
wattroff(calc, A_BOLD);
|
||||
mvwprintw(calc, 5, 9, "could not open %s", cropdir(tmp, 34));
|
||||
mvwaddstr(calc, 6, 3, "press any key to continue...");
|
||||
wrefresh(calc);
|
||||
wgetch(calc);
|
||||
delwin(calc);
|
||||
return(1);
|
||||
}
|
||||
if(sflags & SF_AS) dat.size = fs.st_size;
|
||||
else dat.size = fs.st_blocks * 512;
|
||||
if(sflags & SF_SMFS) dev = fs.st_dev;
|
||||
dat.name = malloc(strlen(tmp)+1);
|
||||
strcpy(dat.name, tmp);
|
||||
|
||||
nodelay(calc, 1);
|
||||
/* main loop */
|
||||
while((ch = wgetch(calc)) != 'q') {
|
||||
direrr = staterr = 0;
|
||||
cdir1 = cdir;
|
||||
|
||||
if(ch == KEY_RESIZE) {
|
||||
delwin(calc);
|
||||
ncresize();
|
||||
calc = calcWin();
|
||||
nodelay(calc, 1);
|
||||
erase();
|
||||
refresh();
|
||||
}
|
||||
|
||||
/* calculate full path of the dir */
|
||||
cdir[0] = '\0';
|
||||
for(i=0; i<=level; i++) {
|
||||
if(i > 0 && !(i == 1 && dat.name[strlen(dat.name)-1] == '/')) strcat(cdir, "/");
|
||||
strcat(cdir, dirs[i]->name);
|
||||
}
|
||||
/* avoid lstat("//name", .) -- Linux:OK, Cygwin:UNC path, POSIX:Implementation-defined */
|
||||
if(cdir[0] == '/' && cdir[1] == '\0')
|
||||
cdir1++;
|
||||
cdir1len = strlen(cdir1);
|
||||
/* opendir */
|
||||
if((dir = opendir(cdir)) == NULL) {
|
||||
dirs[level]->flags |= FF_ERR;
|
||||
for(i=level; i-->0;)
|
||||
dirs[i]->flags |= FF_SERR;
|
||||
err = 1;
|
||||
strcpy(emsg, cdir);
|
||||
dirs[++level] = NULL;
|
||||
goto showstatus;
|
||||
}
|
||||
dirs[++level] = NULL;
|
||||
/* readdir */
|
||||
errno = 0;
|
||||
while((dr = readdir(dir)) != NULL) {
|
||||
f = dr->d_name;
|
||||
if(f[0] == '.' && f[1] == '\0')
|
||||
continue;
|
||||
if(f[0] == '.' && f[1] == '.' && f[2] == '\0' && level == 1)
|
||||
continue;
|
||||
namelen = strlen(f);
|
||||
if(cdir1len+namelen+1 >= PATH_MAX) {
|
||||
direrr = 1;
|
||||
errno = 0;
|
||||
continue;
|
||||
}
|
||||
d = calloc(sizeof(struct dir), 1);
|
||||
d->name = malloc(namelen+1);
|
||||
strcpy(d->name, f);
|
||||
if(dirs[level] != NULL) dirs[level]->next = d;
|
||||
d->prev = dirs[level];
|
||||
d->parent = dirs[level-1];
|
||||
dirs[level-1]->sub = d;
|
||||
dirs[level] = d;
|
||||
/* reference to parent directory, no need to stat */
|
||||
if(f[0] == '.' && f[1] == '.' && f[2] == '\0') {
|
||||
d->flags = FF_PAR;
|
||||
continue;
|
||||
}
|
||||
sprintf(tmp, "%s/%s", cdir1, d->name);
|
||||
/* stat */
|
||||
if(lstat(tmp, &fs)) {
|
||||
staterr = 1;
|
||||
d->flags = FF_ERR;
|
||||
errno = 0;
|
||||
continue;
|
||||
}
|
||||
/* check if file/dir is excluded */
|
||||
if(matchExclude(tmp))
|
||||
d->flags = FF_EXL;
|
||||
/* check filetype */
|
||||
if(sflags & SF_SMFS && dev != fs.st_dev)
|
||||
d->flags |= FF_OTHFS;
|
||||
if(S_ISREG(fs.st_mode)) {
|
||||
d->flags |= FF_FILE;
|
||||
if(!(d->flags & FF_EXL))
|
||||
for(i=level; i-->0;)
|
||||
dirs[i]->files++;
|
||||
} else if(S_ISDIR(fs.st_mode)) {
|
||||
d->flags |= FF_DIR;
|
||||
if(!(d->flags & FF_EXL))
|
||||
for(i=level; i-->0;)
|
||||
dirs[i]->dirs++;
|
||||
} else
|
||||
d->flags |= FF_OTHER;
|
||||
if(d->flags & FF_EXL)
|
||||
continue;
|
||||
/* count size */
|
||||
if(sflags & SF_AS)
|
||||
d->size = fs.st_size;
|
||||
else
|
||||
d->size = fs.st_blocks * 512;
|
||||
if(d->flags & FF_OTHFS) d->size = 0;
|
||||
for(i=level; i-->0;)
|
||||
dirs[i]->size += d->size;
|
||||
errno = 0;
|
||||
}
|
||||
/* empty dir - remove the reference to the parent dir */
|
||||
if(dirs[level]->next == NULL && dirs[level]->prev == NULL) {
|
||||
dirs[level]->parent->sub = NULL;
|
||||
free(dirs[level]->name);
|
||||
free(dirs[level]);
|
||||
dirs[level] = NULL;
|
||||
}
|
||||
/* check for errors */
|
||||
if(errno)
|
||||
direrr = 1;
|
||||
closedir(dir);
|
||||
if(direrr || staterr) {
|
||||
dirs[level-1]->flags |= (direrr ? FF_ERR : FF_SERR);
|
||||
for(i=level-1; i-->0;)
|
||||
dirs[i]->flags |= FF_SERR;
|
||||
}
|
||||
|
||||
/* show progress status */
|
||||
showstatus:
|
||||
gettimeofday(&tv, (void *)NULL);
|
||||
tv.tv_usec = (1000*(tv.tv_sec % 1000) + (tv.tv_usec / 1000)) / sdelay;
|
||||
if(l == tv.tv_usec) goto newdir;
|
||||
mvwprintw(calc, 3, 15, "%-43s", cropdir(cdir, 43));
|
||||
mvwprintw(calc, 2, 15, "%d", dat.files);
|
||||
mvwprintw(calc, 2, 30, "%d", dat.dirs);
|
||||
mvwaddstr(calc, 2, 45, cropsize(dat.size));
|
||||
|
||||
if(err == 1) {
|
||||
wattron(calc, A_BOLD);
|
||||
mvwaddstr(calc, 5, 2, "Warning:");
|
||||
wattroff(calc, A_BOLD);
|
||||
mvwprintw(calc, 5, 11, "could not open %-32s", cropdir(emsg, 32));
|
||||
mvwaddstr(calc, 6, 3, "some directory sizes may not be correct");
|
||||
}
|
||||
|
||||
/* animation */
|
||||
if(sdelay < 1000) {
|
||||
if(++anpos == 28) anpos = 0;
|
||||
mvwaddstr(calc, 8, 3, " ");
|
||||
if(anpos < 14)
|
||||
for(i=0; i<=anpos; i++)
|
||||
mvwaddch(calc, 8, i+3, antext[i]);
|
||||
else
|
||||
for(i=13; i>anpos-14; i--)
|
||||
mvwaddch(calc, 8, i+3, antext[i]);
|
||||
} else
|
||||
mvwaddstr(calc, 8, 3, antext);
|
||||
wmove(calc, 8, 58);
|
||||
wrefresh(calc);
|
||||
|
||||
|
||||
newdir:
|
||||
l = tv.tv_usec;
|
||||
/* select new directory */
|
||||
while(dirs[level] == NULL || !(dirs[level]->flags & FF_DIR) || dirs[level]->flags & FF_OTHFS || dirs[level]->flags & FF_EXL) {
|
||||
if(dirs[level] != NULL && dirs[level]->prev != NULL)
|
||||
dirs[level] = dirs[level]->prev;
|
||||
else {
|
||||
while(level-- > 0) {
|
||||
if(dirs[level]->prev != NULL) {
|
||||
dirs[level] = dirs[level]->prev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(level < 1) goto endloop;
|
||||
}
|
||||
}
|
||||
}
|
||||
endloop:
|
||||
nodelay(calc, 0);
|
||||
delwin(calc);
|
||||
erase();
|
||||
refresh();
|
||||
if(ch == 'q')
|
||||
return(2);
|
||||
return(0);
|
||||
}
|
||||
|
||||
242
src/delete.c
242
src/delete.c
|
|
@ -1,242 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
suseconds_t lastupdate;
|
||||
|
||||
|
||||
void drawConfirm(struct dir *del, int sel) {
|
||||
WINDOW *cfm;
|
||||
|
||||
cfm = newwin(6, 60, winrows/2-3, wincols/2-30);
|
||||
box(cfm, 0, 0);
|
||||
wattron(cfm, A_BOLD);
|
||||
mvwaddstr(cfm, 0, 4, "Confirm delete");
|
||||
wattroff(cfm, A_BOLD);
|
||||
|
||||
mvwprintw(cfm, 1, 2, "Are you sure you want to delete \"%s\"%c",
|
||||
cropdir(del->name, 21), del->flags & FF_DIR ? ' ' : '?');
|
||||
if(del->flags & FF_DIR)
|
||||
mvwprintw(cfm, 2, 18, "and all of its contents?");
|
||||
|
||||
if(sel == 0)
|
||||
wattron(cfm, A_REVERSE);
|
||||
mvwaddstr(cfm, 4, 15, "yes");
|
||||
wattroff(cfm, A_REVERSE);
|
||||
if(sel == 1)
|
||||
wattron(cfm, A_REVERSE);
|
||||
mvwaddstr(cfm, 4, 24, "no");
|
||||
wattroff(cfm, A_REVERSE);
|
||||
if(sel == 2)
|
||||
wattron(cfm, A_REVERSE);
|
||||
mvwaddstr(cfm, 4, 31, "don't ask me again");
|
||||
wattroff(cfm, A_REVERSE);
|
||||
|
||||
wrefresh(cfm);
|
||||
delwin(cfm);
|
||||
}
|
||||
|
||||
|
||||
/* show progress */
|
||||
void drawProgress(char *file) {
|
||||
WINDOW *prg;
|
||||
|
||||
prg = newwin(6, 60, winrows/2-3, wincols/2-30);
|
||||
nodelay(prg, 1);
|
||||
box(prg, 0, 0);
|
||||
wattron(prg, A_BOLD);
|
||||
mvwaddstr(prg, 0, 4, "Deleting...");
|
||||
wattroff(prg, A_BOLD);
|
||||
|
||||
mvwaddstr(prg, 1, 2, cropdir(file, 47));
|
||||
mvwaddstr(prg, 5, 41, "Press q to abort");
|
||||
|
||||
wrefresh(prg);
|
||||
delwin(prg);
|
||||
}
|
||||
|
||||
|
||||
/* show error dialog */
|
||||
void drawError(int sel, char *file) {
|
||||
WINDOW *err;
|
||||
|
||||
err = newwin(6, 60, winrows/2-3, wincols/2-30);
|
||||
box(err, 0, 0);
|
||||
wattron(err, A_BOLD);
|
||||
mvwaddstr(err, 0, 4, "Error!");
|
||||
wattroff(err, A_BOLD);
|
||||
|
||||
mvwprintw(err, 1, 2, "Can't delete %s:", cropdir(file, 42));
|
||||
mvwaddstr(err, 2, 4, strerror(errno));
|
||||
|
||||
if(sel == 0)
|
||||
wattron(err, A_REVERSE);
|
||||
mvwaddstr(err, 4, 14, "abort");
|
||||
wattroff(err, A_REVERSE);
|
||||
if(sel == 1)
|
||||
wattron(err, A_REVERSE);
|
||||
mvwaddstr(err, 4, 23, "ignore");
|
||||
wattroff(err, A_REVERSE);
|
||||
if(sel == 2)
|
||||
wattron(err, A_REVERSE);
|
||||
mvwaddstr(err, 4, 33, "ignore all");
|
||||
wattroff(err, A_REVERSE);
|
||||
|
||||
wrefresh(err);
|
||||
delwin(err);
|
||||
}
|
||||
|
||||
|
||||
struct dir *deleteDir(struct dir *dr) {
|
||||
struct dir *nxt, *cur;
|
||||
int ch, sel = 0;
|
||||
char file[PATH_MAX];
|
||||
struct timeval tv;
|
||||
|
||||
getpath(dr, file);
|
||||
strcat(file, "/");
|
||||
strcat(file, dr->name);
|
||||
|
||||
/* check for input or screen resizes */
|
||||
nodelay(stdscr, 1);
|
||||
while((ch = getch()) != ERR) {
|
||||
if(ch == 'q')
|
||||
return(NULL);
|
||||
if(ch == KEY_RESIZE) {
|
||||
ncresize();
|
||||
drawBrowser(0);
|
||||
drawProgress(file);
|
||||
}
|
||||
}
|
||||
nodelay(stdscr, 0);
|
||||
|
||||
/* don't update the screen with shorter intervals than sdelay */
|
||||
gettimeofday(&tv, (void *)NULL);
|
||||
tv.tv_usec = (1000*(tv.tv_sec % 1000) + (tv.tv_usec / 1000)) / sdelay;
|
||||
if(lastupdate != tv.tv_usec) {
|
||||
drawProgress(file);
|
||||
lastupdate = tv.tv_usec;
|
||||
}
|
||||
|
||||
/* do the actual deleting */
|
||||
if(dr->flags & FF_DIR) {
|
||||
if(dr->sub != NULL) {
|
||||
nxt = dr->sub;
|
||||
while(nxt->prev != NULL)
|
||||
nxt = nxt->prev;
|
||||
while(nxt != NULL) {
|
||||
cur = nxt;
|
||||
nxt = cur->next;
|
||||
if(cur->flags & FF_PAR) {
|
||||
freedir(cur);
|
||||
continue;
|
||||
}
|
||||
if(deleteDir(cur) == NULL)
|
||||
return(NULL);
|
||||
}
|
||||
}
|
||||
ch = rmdir(file);
|
||||
} else
|
||||
ch = unlink(file);
|
||||
|
||||
/* error occured, ask user what to do */
|
||||
if(ch == -1 && !(sflags & SF_IGNE)) {
|
||||
drawError(sel, file);
|
||||
while((ch = getch())) {
|
||||
switch(ch) {
|
||||
case KEY_LEFT:
|
||||
if(--sel < 0)
|
||||
sel = 0;
|
||||
break;
|
||||
case KEY_RIGHT:
|
||||
if(++sel > 2)
|
||||
sel = 2;
|
||||
break;
|
||||
case 10:
|
||||
if(sel == 0)
|
||||
return(NULL);
|
||||
if(sel == 2)
|
||||
sflags |= SF_IGNE;
|
||||
goto ignore;
|
||||
case 'q':
|
||||
return(NULL);
|
||||
case KEY_RESIZE:
|
||||
ncresize();
|
||||
drawBrowser(0);
|
||||
break;
|
||||
}
|
||||
drawError(sel, file);
|
||||
}
|
||||
};
|
||||
ignore:
|
||||
|
||||
return(freedir(dr));
|
||||
}
|
||||
|
||||
|
||||
struct dir *showDelete(struct dir *dr) {
|
||||
int ch, sel = 1;
|
||||
struct dir *ret;
|
||||
|
||||
/* confirm */
|
||||
if(sflags & SF_NOCFM)
|
||||
goto doit;
|
||||
|
||||
drawConfirm(dr, sel);
|
||||
while((ch = getch())) {
|
||||
switch(ch) {
|
||||
case KEY_LEFT:
|
||||
if(--sel < 0)
|
||||
sel = 0;
|
||||
break;
|
||||
case KEY_RIGHT:
|
||||
if(++sel > 2)
|
||||
sel = 2;
|
||||
break;
|
||||
case 10:
|
||||
if(sel == 1)
|
||||
return(dr);
|
||||
if(sel == 2)
|
||||
sflags |= SF_NOCFM;
|
||||
goto doit;
|
||||
case 'q':
|
||||
return(dr);
|
||||
case KEY_RESIZE:
|
||||
ncresize();
|
||||
drawBrowser(0);
|
||||
break;
|
||||
}
|
||||
drawConfirm(dr, sel);
|
||||
}
|
||||
|
||||
doit:
|
||||
lastupdate = 999; /* just some random high value as initialisation */
|
||||
|
||||
ret = deleteDir(dr);
|
||||
|
||||
return(ret == NULL ? dr : ret);
|
||||
}
|
||||
|
||||
301
src/delete.zig
Normal file
301
src/delete.zig
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
var parent: *model.Dir = undefined;
|
||||
var entry: *model.Entry = undefined;
|
||||
var next_sel: ?*model.Entry = undefined; // Which item to select if deletion succeeds
|
||||
var state: enum { confirm, busy, err } = .confirm;
|
||||
var confirm: enum { yes, no, ignore } = .no;
|
||||
var error_option: enum { abort, ignore, all } = .abort;
|
||||
var error_code: anyerror = undefined;
|
||||
|
||||
pub fn setup(p: *model.Dir, e: *model.Entry, n: ?*model.Entry) void {
|
||||
parent = p;
|
||||
entry = e;
|
||||
next_sel = n;
|
||||
state = if (main.config.confirm_delete) .confirm else .busy;
|
||||
confirm = .no;
|
||||
}
|
||||
|
||||
|
||||
// Returns true to abort scanning.
|
||||
fn err(e: anyerror) bool {
|
||||
if (main.config.ignore_delete_errors)
|
||||
return false;
|
||||
error_code = e;
|
||||
state = .err;
|
||||
|
||||
while (main.state == .delete and state == .err)
|
||||
main.handleEvent(true, false);
|
||||
|
||||
return main.state != .delete;
|
||||
}
|
||||
|
||||
fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry) bool {
|
||||
entry = ptr.*.?;
|
||||
main.handleEvent(false, false);
|
||||
if (main.state != .delete)
|
||||
return true;
|
||||
|
||||
if (entry.dir()) |d| {
|
||||
var fd = dir.openDirZ(path, .{ .no_follow = true, .iterate = false }) catch |e| return err(e);
|
||||
var it = &d.sub.ptr;
|
||||
parent = d;
|
||||
defer parent = parent.parent.?;
|
||||
while (it.*) |n| {
|
||||
if (deleteItem(fd, n.name(), it)) {
|
||||
fd.close();
|
||||
return true;
|
||||
}
|
||||
if (it.* == n) // item deletion failed, make sure to still advance to next
|
||||
it = &n.next.ptr;
|
||||
}
|
||||
fd.close();
|
||||
dir.deleteDirZ(path) catch |e|
|
||||
return if (e != error.DirNotEmpty or d.sub.ptr == null) err(e) else false;
|
||||
} else
|
||||
dir.deleteFileZ(path) catch |e| return err(e);
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the item has been deleted successfully.
|
||||
fn deleteCmd(path: [:0]const u8, ptr: *align(1) ?*model.Entry) bool {
|
||||
{
|
||||
var env = std.process.getEnvMap(main.allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
env.put("NCDU_DELETE_PATH", path) catch unreachable;
|
||||
|
||||
// Since we're passing the path as an environment variable and go through
|
||||
// the shell anyway, we can refer to the variable and avoid error-prone
|
||||
// shell escaping.
|
||||
const cmd = std.fmt.allocPrint(main.allocator, "{s} \"$NCDU_DELETE_PATH\"", .{main.config.delete_command}) catch unreachable;
|
||||
defer main.allocator.free(cmd);
|
||||
ui.runCmd(&.{"/bin/sh", "-c", cmd}, null, &env, true);
|
||||
}
|
||||
|
||||
const stat = scan.statAt(std.fs.cwd(), path, false, null) catch {
|
||||
// Stat failed. Would be nice to display an error if it's not
|
||||
// 'FileNotFound', but w/e, let's just assume the item has been
|
||||
// deleted as expected.
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return true;
|
||||
};
|
||||
|
||||
// If either old or new entry is not a dir, remove & re-add entry in the in-memory tree.
|
||||
if (ptr.*.?.pack.etype != .dir or stat.etype != .dir) {
|
||||
ptr.*.?.zeroStats(parent);
|
||||
const e = model.Entry.create(main.allocator, stat.etype, main.config.extended and !stat.ext.isEmpty(), ptr.*.?.name());
|
||||
e.next.ptr = ptr.*.?.next.ptr;
|
||||
mem_sink.statToEntry(&stat, e, parent);
|
||||
ptr.* = e;
|
||||
|
||||
var it : ?*model.Dir = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
if (stat.etype != .link) {
|
||||
p.entry.pack.blocks +|= e.pack.blocks;
|
||||
p.entry.size +|= e.size;
|
||||
}
|
||||
p.items +|= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If new entry is a dir, recursively scan.
|
||||
if (ptr.*.?.dir()) |d| {
|
||||
main.state = .refresh;
|
||||
sink.global.sink = .mem;
|
||||
mem_sink.global.root = d;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns the item that should be selected in the browser.
|
||||
pub fn delete() ?*model.Entry {
|
||||
while (main.state == .delete and state == .confirm)
|
||||
main.handleEvent(true, false);
|
||||
if (main.state != .delete)
|
||||
return entry;
|
||||
|
||||
// Find the pointer to this entry
|
||||
const e = entry;
|
||||
var it = &parent.sub.ptr;
|
||||
while (it.*) |n| : (it = &n.next.ptr)
|
||||
if (it.* == entry)
|
||||
break;
|
||||
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, true, &path);
|
||||
if (path.items.len == 0 or path.items[path.items.len-1] != '/')
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
if (main.config.delete_command.len == 0) {
|
||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (it.* == e) e else next_sel;
|
||||
} else {
|
||||
const isdel = deleteCmd(util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (isdel) next_sel else it.*;
|
||||
}
|
||||
}
|
||||
|
||||
fn drawConfirm() void {
|
||||
browser.draw();
|
||||
const box = ui.Box.create(6, 60, "Confirm delete");
|
||||
box.move(1, 2);
|
||||
if (main.config.delete_command.len == 0) {
|
||||
ui.addstr("Are you sure you want to delete \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 21));
|
||||
ui.addch('"');
|
||||
if (entry.pack.etype != .dir)
|
||||
ui.addch('?')
|
||||
else {
|
||||
box.move(2, 18);
|
||||
ui.addstr("and all of its contents?");
|
||||
}
|
||||
} else {
|
||||
ui.addstr("Are you sure you want to run \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(main.config.delete_command), 25));
|
||||
ui.addch('"');
|
||||
box.move(2, 4);
|
||||
ui.addstr("on \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 50));
|
||||
ui.addch('"');
|
||||
}
|
||||
|
||||
box.move(4, 15);
|
||||
ui.style(if (confirm == .yes) .sel else .default);
|
||||
ui.addstr("yes");
|
||||
|
||||
box.move(4, 25);
|
||||
ui.style(if (confirm == .no) .sel else .default);
|
||||
ui.addstr("no");
|
||||
|
||||
box.move(4, 31);
|
||||
ui.style(if (confirm == .ignore) .sel else .default);
|
||||
ui.addstr("don't ask me again");
|
||||
box.move(4, switch (confirm) {
|
||||
.yes => 15,
|
||||
.no => 25,
|
||||
.ignore => 31
|
||||
});
|
||||
}
|
||||
|
||||
fn drawProgress() void {
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
// TODO: Item counts and progress bar would be nice.
|
||||
|
||||
const box = ui.Box.create(6, 60, "Deleting...");
|
||||
box.move(2, 2);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 56));
|
||||
box.move(4, 41);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('q');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to abort");
|
||||
}
|
||||
|
||||
fn drawErr() void {
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
const box = ui.Box.create(6, 60, "Error");
|
||||
box.move(1, 2);
|
||||
ui.addstr("Error deleting ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 41));
|
||||
box.move(2, 4);
|
||||
ui.addstr(ui.errorString(error_code));
|
||||
|
||||
box.move(4, 14);
|
||||
ui.style(if (error_option == .abort) .sel else .default);
|
||||
ui.addstr("abort");
|
||||
|
||||
box.move(4, 23);
|
||||
ui.style(if (error_option == .ignore) .sel else .default);
|
||||
ui.addstr("ignore");
|
||||
|
||||
box.move(4, 33);
|
||||
ui.style(if (error_option == .all) .sel else .default);
|
||||
ui.addstr("ignore all");
|
||||
}
|
||||
|
||||
pub fn draw() void {
|
||||
switch (state) {
|
||||
.confirm => drawConfirm(),
|
||||
.busy => drawProgress(),
|
||||
.err => drawErr(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
switch (state) {
|
||||
.confirm => switch (ch) {
|
||||
'h', c.KEY_LEFT => confirm = switch (confirm) {
|
||||
.ignore => .no,
|
||||
else => .yes,
|
||||
},
|
||||
'l', c.KEY_RIGHT => confirm = switch (confirm) {
|
||||
.yes => .no,
|
||||
else => .ignore,
|
||||
},
|
||||
'q' => main.state = .browse,
|
||||
'\n' => switch (confirm) {
|
||||
.yes => state = .busy,
|
||||
.no => main.state = .browse,
|
||||
.ignore => {
|
||||
main.config.confirm_delete = false;
|
||||
state = .busy;
|
||||
},
|
||||
},
|
||||
else => {}
|
||||
},
|
||||
.busy => {
|
||||
if (ch == 'q')
|
||||
main.state = .browse;
|
||||
},
|
||||
.err => switch (ch) {
|
||||
'h', c.KEY_LEFT => error_option = switch (error_option) {
|
||||
.all => .ignore,
|
||||
else => .abort,
|
||||
},
|
||||
'l', c.KEY_RIGHT => error_option = switch (error_option) {
|
||||
.abort => .ignore,
|
||||
else => .all,
|
||||
},
|
||||
'q' => main.state = .browse,
|
||||
'\n' => switch (error_option) {
|
||||
.abort => main.state = .browse,
|
||||
.ignore => state = .busy,
|
||||
.all => {
|
||||
main.config.ignore_delete_errors = true;
|
||||
state = .busy;
|
||||
},
|
||||
},
|
||||
else => {}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
|
||||
struct exclude {
|
||||
char *pattern;
|
||||
struct exclude *next;
|
||||
};
|
||||
|
||||
struct exclude *excludes = NULL,
|
||||
*last = NULL;
|
||||
|
||||
|
||||
|
||||
void addExclude(char *pat) {
|
||||
struct exclude *n;
|
||||
|
||||
n = (struct exclude *) malloc(sizeof(struct exclude));
|
||||
n->pattern = (char *) malloc(strlen(pat)+1);
|
||||
strcpy(n->pattern, pat);
|
||||
n->next = NULL;
|
||||
|
||||
if(excludes == NULL) {
|
||||
excludes = n;
|
||||
last = excludes;
|
||||
} else {
|
||||
last->next = n;
|
||||
last = last->next;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int addExcludeFile(char *file) {
|
||||
FILE *f;
|
||||
char buf[256];
|
||||
int len;
|
||||
|
||||
if((f = fopen(file, "r")) == NULL)
|
||||
return(1);
|
||||
|
||||
while(fgets(buf, 256, f) != NULL) {
|
||||
len = strlen(buf)-1;
|
||||
while(len >=0 && (buf[len] == '\r' || buf[len] == '\n'))
|
||||
buf[len--] = '\0';
|
||||
if(len < 0)
|
||||
continue;
|
||||
addExclude(buf);
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
return(0);
|
||||
}
|
||||
|
||||
|
||||
int matchExclude(char *path) {
|
||||
struct exclude *n = excludes;
|
||||
char *c;
|
||||
int matched = 0;
|
||||
|
||||
if(excludes == NULL)
|
||||
return(0);
|
||||
|
||||
do {
|
||||
matched = !fnmatch(n->pattern, path, 0);
|
||||
for(c = path; *c && !matched; c++)
|
||||
if(*c == '/' && c[1] != '/')
|
||||
matched = !fnmatch(n->pattern, c+1, 0);
|
||||
} while((n = n->next) != NULL && !matched);
|
||||
|
||||
return(matched);
|
||||
}
|
||||
|
||||
322
src/exclude.zig
Normal file
322
src/exclude.zig
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Reference:
|
||||
// https://manned.org/glob.7
|
||||
// https://manned.org/man.b4c7391e/rsync#head17
|
||||
// https://manned.org/man.401d6ade/arch/gitignore#head4
|
||||
// Patterns:
|
||||
// Single component (none of these patterns match a '/'):
|
||||
// * -> match any character sequence
|
||||
// ? -> match single character
|
||||
// [abc] -> match a single character in the given list
|
||||
// [a-c] -> match a single character in the given range
|
||||
// [!a-c] -> match a single character not in the given range
|
||||
// # (these are currently still handled by calling libc fnmatch())
|
||||
// Anchored patterns:
|
||||
// /pattern
|
||||
// /dir/pattern
|
||||
// /dir/subdir/pattern
|
||||
// # In both rsync and gitignore, anchored patterns are relative to the
|
||||
// # directory under consideration. In ncdu they are instead anchored to
|
||||
// # the filesystem root (i.e. matched against the absolute path).
|
||||
// Non-anchored patterns:
|
||||
// somefile
|
||||
// subdir/foo
|
||||
// sub*/bar
|
||||
// # In .gitignore, non-anchored patterns with a slash are implicitely anchored,
|
||||
// # in rsync they can match anywhere in a path. We follow rsync here.
|
||||
// Dir patterns (trailing '/' matches only dirs):
|
||||
// /pattern/
|
||||
// somedir/
|
||||
// subdir/pattern/
|
||||
//
|
||||
// BREAKING CHANGE:
|
||||
// ncdu < 2.2 single-component matches may cross directory boundary, e.g.
|
||||
// 'a*b' matches 'a/b'. This is an old bug, the fix breaks compatibility with
|
||||
// old exlude patterns.
|
||||
|
||||
const Pattern = struct {
|
||||
isdir: bool = undefined,
|
||||
isliteral: bool = undefined,
|
||||
pattern: [:0]const u8,
|
||||
sub: ?*const Pattern = undefined,
|
||||
|
||||
fn isLiteral(str: []const u8) bool {
|
||||
for (str) |chr| switch (chr) {
|
||||
'[', '*', '?', '\\' => return false,
|
||||
else => {},
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
fn parse(pat_: []const u8) *const Pattern {
|
||||
var pat = std.mem.trimLeft(u8, pat_, "/");
|
||||
const top = main.allocator.create(Pattern) catch unreachable;
|
||||
var tail = top;
|
||||
tail.sub = null;
|
||||
while (std.mem.indexOfScalar(u8, pat, '/')) |idx| {
|
||||
tail.pattern = main.allocator.dupeZ(u8, pat[0..idx]) catch unreachable;
|
||||
tail.isdir = true;
|
||||
tail.isliteral = isLiteral(tail.pattern);
|
||||
pat = pat[idx+1..];
|
||||
if (std.mem.allEqual(u8, pat, '/')) return top;
|
||||
|
||||
const next = main.allocator.create(Pattern) catch unreachable;
|
||||
tail.sub = next;
|
||||
tail = next;
|
||||
tail.sub = null;
|
||||
}
|
||||
tail.pattern = main.allocator.dupeZ(u8, pat) catch unreachable;
|
||||
tail.isdir = false;
|
||||
tail.isliteral = isLiteral(tail.pattern);
|
||||
return top;
|
||||
}
|
||||
};
|
||||
|
||||
test "parse" {
|
||||
const t1 = Pattern.parse("");
|
||||
try std.testing.expectEqualStrings(t1.pattern, "");
|
||||
try std.testing.expectEqual(t1.isdir, false);
|
||||
try std.testing.expectEqual(t1.isliteral, true);
|
||||
try std.testing.expectEqual(t1.sub, null);
|
||||
|
||||
const t2 = Pattern.parse("//a//");
|
||||
try std.testing.expectEqualStrings(t2.pattern, "a");
|
||||
try std.testing.expectEqual(t2.isdir, true);
|
||||
try std.testing.expectEqual(t2.isliteral, true);
|
||||
try std.testing.expectEqual(t2.sub, null);
|
||||
|
||||
const t3 = Pattern.parse("foo*/bar.zig");
|
||||
try std.testing.expectEqualStrings(t3.pattern, "foo*");
|
||||
try std.testing.expectEqual(t3.isdir, true);
|
||||
try std.testing.expectEqual(t3.isliteral, false);
|
||||
try std.testing.expectEqualStrings(t3.sub.?.pattern, "bar.zig");
|
||||
try std.testing.expectEqual(t3.sub.?.isdir, false);
|
||||
try std.testing.expectEqual(t3.sub.?.isliteral, true);
|
||||
try std.testing.expectEqual(t3.sub.?.sub, null);
|
||||
|
||||
const t4 = Pattern.parse("/?/sub/dir/");
|
||||
try std.testing.expectEqualStrings(t4.pattern, "?");
|
||||
try std.testing.expectEqual(t4.isdir, true);
|
||||
try std.testing.expectEqual(t4.isliteral, false);
|
||||
try std.testing.expectEqualStrings(t4.sub.?.pattern, "sub");
|
||||
try std.testing.expectEqual(t4.sub.?.isdir, true);
|
||||
try std.testing.expectEqual(t4.sub.?.isliteral, true);
|
||||
try std.testing.expectEqualStrings(t4.sub.?.sub.?.pattern, "dir");
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.isdir, true);
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.isliteral, true);
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.sub, null);
|
||||
}
|
||||
|
||||
|
||||
// List of patterns to be matched at one particular level.
|
||||
// There are 2 different types of lists: those where all patterns have a
|
||||
// sub-pointer (where the pattern only matches directories at this level, and
|
||||
// the match result is only used to construct the PatternList of the
|
||||
// subdirectory) and patterns without a sub-pointer (where the match result
|
||||
// determines whether the file/dir at this level should be included or not).
|
||||
fn PatternList(comptime withsub: bool) type {
|
||||
return struct {
|
||||
literals: std.HashMapUnmanaged(*const Pattern, Val, Ctx, 80) = .{},
|
||||
wild: std.ArrayListUnmanaged(*const Pattern) = .empty,
|
||||
|
||||
// Not a fan of the map-of-arrays approach in the 'withsub' case, it
|
||||
// has a lot of extra allocations. Linking the Patterns together in a
|
||||
// list would be nicer, but that involves mutable Patterns, which in
|
||||
// turn prevents multithreaded scanning. An alternative would be a
|
||||
// sorted array + binary search, but that slows down lookups. Perhaps a
|
||||
// custom hashmap with support for duplicate keys?
|
||||
const Val = if (withsub) std.ArrayListUnmanaged(*const Pattern) else void;
|
||||
|
||||
const Ctx = struct {
|
||||
pub fn hash(_: Ctx, p: *const Pattern) u64 {
|
||||
return std.hash.Wyhash.hash(0, p.pattern);
|
||||
}
|
||||
pub fn eql(_: Ctx, a: *const Pattern, b: *const Pattern) bool {
|
||||
return std.mem.eql(u8, a.pattern, b.pattern);
|
||||
}
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn append(self: *Self, pat: *const Pattern) void {
|
||||
std.debug.assert((pat.sub != null) == withsub);
|
||||
if (pat.isliteral) {
|
||||
const e = self.literals.getOrPut(main.allocator, pat) catch unreachable;
|
||||
if (!e.found_existing) {
|
||||
e.key_ptr.* = pat;
|
||||
e.value_ptr.* = if (withsub) .{} else {};
|
||||
}
|
||||
if (!withsub and !pat.isdir and e.key_ptr.*.isdir) e.key_ptr.* = pat;
|
||||
if (withsub) {
|
||||
if (pat.sub) |s| e.value_ptr.*.append(main.allocator, s) catch unreachable;
|
||||
}
|
||||
|
||||
} else self.wild.append(main.allocator, pat) catch unreachable;
|
||||
}
|
||||
|
||||
fn match(self: *const Self, name: [:0]const u8) ?bool {
|
||||
var ret: ?bool = null;
|
||||
if (self.literals.getKey(&.{ .pattern = name })) |p| ret = p.isdir;
|
||||
for (self.wild.items) |p| {
|
||||
if (ret == false) return ret;
|
||||
if (c.fnmatch(p.pattern.ptr, name.ptr, 0) == 0) ret = p.isdir;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
fn enter(self: *const Self, out: *Patterns, name: [:0]const u8) void {
|
||||
if (self.literals.get(&.{ .pattern = name })) |lst| for (lst.items) |sub| out.append(sub);
|
||||
for (self.wild.items) |p| if (c.fnmatch(p.pattern.ptr, name.ptr, 0) == 0) out.append(p.sub.?);
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
if (withsub) {
|
||||
var it = self.literals.valueIterator();
|
||||
while (it.next()) |e| e.deinit(main.allocator);
|
||||
}
|
||||
self.literals.deinit(main.allocator);
|
||||
self.wild.deinit(main.allocator);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// List of all patterns that should be matched at one level.
|
||||
pub const Patterns = struct {
|
||||
nonsub: PatternList(false) = .{},
|
||||
sub: PatternList(true) = .{},
|
||||
isroot: bool = false,
|
||||
|
||||
fn append(self: *Patterns, pat: *const Pattern) void {
|
||||
if (pat.sub == null) self.nonsub.append(pat)
|
||||
else self.sub.append(pat);
|
||||
}
|
||||
|
||||
// Matches patterns in this level plus unanchored patterns.
|
||||
// Returns null if nothing matches, otherwise whether the given item should
|
||||
// only be exluced if it's a directory.
|
||||
// (Should not be called on root_unanchored)
|
||||
pub fn match(self: *const Patterns, name: [:0]const u8) ?bool {
|
||||
const a = self.nonsub.match(name);
|
||||
if (a == false) return false;
|
||||
const b = root_unanchored.nonsub.match(name);
|
||||
if (b == false) return false;
|
||||
return a orelse b;
|
||||
}
|
||||
|
||||
// Construct the list of patterns for a subdirectory.
|
||||
pub fn enter(self: *const Patterns, name: [:0]const u8) Patterns {
|
||||
var ret = Patterns{};
|
||||
self.sub.enter(&ret, name);
|
||||
root_unanchored.sub.enter(&ret, name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Patterns) void {
|
||||
// getPatterns() result should be deinit()ed, except when it returns the root,
|
||||
// let's simplify that and simply don't deinit root.
|
||||
if (self.isroot) return;
|
||||
self.nonsub.deinit();
|
||||
self.sub.deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
// Unanchored patterns that should be checked at every level
|
||||
var root_unanchored: Patterns = .{};
|
||||
|
||||
// Patterns anchored at the root
|
||||
var root: Patterns = .{ .isroot = true };
|
||||
|
||||
pub fn addPattern(pattern: []const u8) void {
|
||||
if (pattern.len == 0) return;
|
||||
const p = Pattern.parse(pattern);
|
||||
if (pattern[0] == '/') root.append(p)
|
||||
else root_unanchored.append(p);
|
||||
}
|
||||
|
||||
// Get the patterns for the given (absolute) path, assuming the given path
|
||||
// itself hasn't been excluded. This function is slow, directory walking code
|
||||
// should use Patterns.enter() instead.
|
||||
pub fn getPatterns(path_: []const u8) Patterns {
|
||||
var path = std.mem.trim(u8, path_, "/");
|
||||
if (path.len == 0) return root;
|
||||
var pat = root;
|
||||
defer pat.deinit();
|
||||
while (std.mem.indexOfScalar(u8, path, '/')) |idx| {
|
||||
const name = main.allocator.dupeZ(u8, path[0..idx]) catch unreachable;
|
||||
defer main.allocator.free(name);
|
||||
path = path[idx+1..];
|
||||
|
||||
const sub = pat.enter(name);
|
||||
pat.deinit();
|
||||
pat = sub;
|
||||
}
|
||||
|
||||
const name = main.allocator.dupeZ(u8, path) catch unreachable;
|
||||
defer main.allocator.free(name);
|
||||
return pat.enter(name);
|
||||
}
|
||||
|
||||
|
||||
fn testfoo(p: *const Patterns) !void {
|
||||
try std.testing.expectEqual(p.match("root"), null);
|
||||
try std.testing.expectEqual(p.match("bar"), false);
|
||||
try std.testing.expectEqual(p.match("qoo"), false);
|
||||
try std.testing.expectEqual(p.match("xyz"), false);
|
||||
try std.testing.expectEqual(p.match("okay"), null);
|
||||
try std.testing.expectEqual(p.match("somefile"), false);
|
||||
var s = p.enter("okay");
|
||||
try std.testing.expectEqual(s.match("bar"), null);
|
||||
try std.testing.expectEqual(s.match("xyz"), null);
|
||||
try std.testing.expectEqual(s.match("notokay"), false);
|
||||
s.deinit();
|
||||
}
|
||||
|
||||
test "Matching" {
|
||||
addPattern("/foo/bar");
|
||||
addPattern("/foo/qoo/");
|
||||
addPattern("/foo/qoo");
|
||||
addPattern("/foo/qoo/");
|
||||
addPattern("/f??/xyz");
|
||||
addPattern("/f??/xyz/");
|
||||
addPattern("/*o/somefile");
|
||||
addPattern("/a??/okay");
|
||||
addPattern("/roo?");
|
||||
addPattern("/root/");
|
||||
addPattern("excluded");
|
||||
addPattern("somefile/");
|
||||
addPattern("o*y/not[o]kay");
|
||||
|
||||
var a0 = getPatterns("/");
|
||||
try std.testing.expectEqual(a0.match("a"), null);
|
||||
try std.testing.expectEqual(a0.match("excluded"), false);
|
||||
try std.testing.expectEqual(a0.match("somefile"), true);
|
||||
try std.testing.expectEqual(a0.match("root"), false);
|
||||
var a1 = a0.enter("foo");
|
||||
a0.deinit();
|
||||
try testfoo(&a1);
|
||||
a1.deinit();
|
||||
|
||||
var b0 = getPatterns("/somedir/somewhere");
|
||||
try std.testing.expectEqual(b0.match("a"), null);
|
||||
try std.testing.expectEqual(b0.match("excluded"), false);
|
||||
try std.testing.expectEqual(b0.match("root"), null);
|
||||
try std.testing.expectEqual(b0.match("okay"), null);
|
||||
var b1 = b0.enter("okay");
|
||||
b0.deinit();
|
||||
try std.testing.expectEqual(b1.match("excluded"), false);
|
||||
try std.testing.expectEqual(b1.match("okay"), null);
|
||||
try std.testing.expectEqual(b1.match("notokay"), false);
|
||||
b1.deinit();
|
||||
|
||||
var c0 = getPatterns("/foo/");
|
||||
try testfoo(&c0);
|
||||
c0.deinit();
|
||||
}
|
||||
193
src/help.c
193
src/help.c
|
|
@ -1,193 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
|
||||
void drawHelp(int page) {
|
||||
WINDOW *hlp;
|
||||
|
||||
hlp = newwin(15, 60, winrows/2-7, wincols/2-30);
|
||||
box(hlp, 0, 0);
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 0, 4, "ncdu help");
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 13, 32, "Press any key to continue");
|
||||
|
||||
switch(page) {
|
||||
case 1:
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 1, 30, "1:Keys");
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 1, 39, "2:Format");
|
||||
mvwaddstr(hlp, 1, 50, "3:About");
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 3, 7, "up/down");
|
||||
mvwaddstr(hlp, 4, 3, "right/enter");
|
||||
mvwaddstr(hlp, 5, 10, "left");
|
||||
mvwaddstr(hlp, 6, 11, "n/s");
|
||||
mvwaddch( hlp, 7, 13, 'd');
|
||||
mvwaddch( hlp, 8, 13, 't');
|
||||
mvwaddch( hlp, 9, 13, 'g');
|
||||
mvwaddch( hlp,10, 13, 'p');
|
||||
mvwaddch( hlp,11, 13, 'h');
|
||||
mvwaddch( hlp,12, 13, 'q');
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 3, 16, "Cycle through the items");
|
||||
mvwaddstr(hlp, 4, 16, "Open directory");
|
||||
mvwaddstr(hlp, 5, 16, "Previous directory");
|
||||
mvwaddstr(hlp, 6, 16, "Sort by name or size (asc/desc)");
|
||||
mvwaddstr(hlp, 7, 16, "Delete selected file or directory");
|
||||
mvwaddstr(hlp, 8, 16, "Toggle dirs before files when sorting");
|
||||
mvwaddstr(hlp, 9, 16, "Show percentage and/or graph");
|
||||
mvwaddstr(hlp,10, 16, "Toggle between powers of 1000 and 1024");
|
||||
mvwaddstr(hlp,11, 16, "Show/hide hidden or excluded files");
|
||||
mvwaddstr(hlp,12, 16, "Quit ncdu");
|
||||
break;
|
||||
case 2:
|
||||
mvwaddstr(hlp, 1, 30, "1:Keys");
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 1, 39, "2:Format");
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 1, 50, "3:About");
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 3, 3, "X [size] [file or directory]");
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 5, 4, "The X is only present in the following cases:");
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddch(hlp, 6, 4, '!');
|
||||
mvwaddch(hlp, 7, 4, '.');
|
||||
mvwaddch(hlp, 8, 4, '>');
|
||||
mvwaddch(hlp, 9, 4, '<');
|
||||
mvwaddch(hlp,10, 4, '@');
|
||||
mvwaddch(hlp,11, 4, 'e');
|
||||
wattroff(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 6, 7, "An error occured while reading this directory");
|
||||
mvwaddstr(hlp, 7, 7, "An error occured while reading a subdirectory");
|
||||
mvwaddstr(hlp, 8, 7, "File or directory is excluded from the statistics");
|
||||
mvwaddstr(hlp, 9, 7, "Directory was on an other filesystem");
|
||||
mvwaddstr(hlp,10, 7, "This is not a file nor a dir (symlink, socket, ...)");
|
||||
mvwaddstr(hlp,11, 7, "Empty directory");
|
||||
break;
|
||||
case 3:
|
||||
/* Indeed, too much spare time */
|
||||
mvwaddstr(hlp, 1, 30, "1:Keys");
|
||||
mvwaddstr(hlp, 1, 39, "2:Format");
|
||||
wattron(hlp, A_BOLD);
|
||||
mvwaddstr(hlp, 1, 50, "3:About");
|
||||
wattroff(hlp, A_BOLD);
|
||||
wattron(hlp, A_REVERSE);
|
||||
#define x 12
|
||||
#define y 4
|
||||
/* N */
|
||||
mvwaddstr(hlp, y+0, x+0, " ");
|
||||
mvwaddstr(hlp, y+1, x+0, " ");
|
||||
mvwaddstr(hlp, y+2, x+0, " ");
|
||||
mvwaddstr(hlp, y+3, x+0, " ");
|
||||
mvwaddstr(hlp, y+4, x+0, " ");
|
||||
mvwaddstr(hlp, y+1, x+4, " ");
|
||||
mvwaddstr(hlp, y+2, x+4, " ");
|
||||
mvwaddstr(hlp, y+3, x+4, " ");
|
||||
mvwaddstr(hlp, y+4, x+4, " ");
|
||||
/* C */
|
||||
mvwaddstr(hlp, y+0, x+8, " ");
|
||||
mvwaddstr(hlp, y+1, x+8, " ");
|
||||
mvwaddstr(hlp, y+2, x+8, " ");
|
||||
mvwaddstr(hlp, y+3, x+8, " ");
|
||||
mvwaddstr(hlp, y+4, x+8, " ");
|
||||
/* D */
|
||||
mvwaddstr(hlp, y+0, x+19, " ");
|
||||
mvwaddstr(hlp, y+1, x+19, " ");
|
||||
mvwaddstr(hlp, y+2, x+15, " ");
|
||||
mvwaddstr(hlp, y+3, x+15, " ");
|
||||
mvwaddstr(hlp, y+3, x+19, " ");
|
||||
mvwaddstr(hlp, y+4, x+15, " ");
|
||||
/* U */
|
||||
mvwaddstr(hlp, y+0, x+23, " ");
|
||||
mvwaddstr(hlp, y+1, x+23, " ");
|
||||
mvwaddstr(hlp, y+2, x+23, " ");
|
||||
mvwaddstr(hlp, y+3, x+23, " ");
|
||||
mvwaddstr(hlp, y+0, x+27, " ");
|
||||
mvwaddstr(hlp, y+1, x+27, " ");
|
||||
mvwaddstr(hlp, y+2, x+27, " ");
|
||||
mvwaddstr(hlp, y+3, x+27, " ");
|
||||
mvwaddstr(hlp, y+4, x+23, " ");
|
||||
wattroff(hlp, A_REVERSE);
|
||||
mvwaddstr(hlp, y+0, x+30, "NCurses");
|
||||
mvwaddstr(hlp, y+1, x+30, "Disk");
|
||||
mvwaddstr(hlp, y+2, x+30, "Usage");
|
||||
mvwprintw(hlp, y+4, x+30, "%s", PACKAGE_VERSION);
|
||||
mvwaddstr(hlp,10, 7, "Written by Yoran Heling <projects@yorhel.nl>");
|
||||
mvwaddstr(hlp,11, 16, "http://dev.yorhel.nl/ncdu/");
|
||||
break;
|
||||
case 4:
|
||||
mvwaddstr(hlp, 1, 30, "1:Keys");
|
||||
mvwaddstr(hlp, 1, 39, "2:Format");
|
||||
mvwaddstr(hlp, 1, 50, "3:About");
|
||||
mvwaddstr(hlp, 3, 3, "There is no fourth window, baka~~");
|
||||
}
|
||||
wrefresh(hlp);
|
||||
delwin(hlp); /* no need to use it anymore - free it */
|
||||
}
|
||||
|
||||
|
||||
void showHelp(void) {
|
||||
int p = 1, ch;
|
||||
|
||||
drawHelp(p);
|
||||
while((ch = getch())) {
|
||||
switch(ch) {
|
||||
case '1':
|
||||
p = 1;
|
||||
break;
|
||||
case '2':
|
||||
p = 2;
|
||||
break;
|
||||
case '3':
|
||||
p = 3;
|
||||
break;
|
||||
case '4':
|
||||
p = 4;
|
||||
break;
|
||||
case KEY_RIGHT:
|
||||
if(++p > 4)
|
||||
p = 4;
|
||||
break;
|
||||
case KEY_LEFT:
|
||||
if(--p < 1)
|
||||
p = 1;
|
||||
break;
|
||||
case KEY_RESIZE:
|
||||
ncresize();
|
||||
drawBrowser(0);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
drawHelp(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
270
src/json_export.zig
Normal file
270
src/json_export.zig
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// JSON output is necessarily single-threaded and items MUST be added depth-first.
|
||||
|
||||
pub const global = struct {
|
||||
var writer: *Writer = undefined;
|
||||
};
|
||||
|
||||
|
||||
const ZstdWriter = struct {
|
||||
ctx: ?*c.ZSTD_CStream,
|
||||
out: c.ZSTD_outBuffer,
|
||||
outbuf: [c.ZSTD_BLOCKSIZE_MAX + 64]u8,
|
||||
|
||||
fn create() *ZstdWriter {
|
||||
const w = main.allocator.create(ZstdWriter) catch unreachable;
|
||||
w.out = .{
|
||||
.dst = &w.outbuf,
|
||||
.size = w.outbuf.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
w.ctx = c.ZSTD_createCStream();
|
||||
if (w.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
_ = c.ZSTD_CCtx_setParameter(w.ctx, c.ZSTD_c_compressionLevel, main.config.complevel);
|
||||
return w;
|
||||
}
|
||||
|
||||
fn destroy(w: *ZstdWriter) void {
|
||||
_ = c.ZSTD_freeCStream(w.ctx);
|
||||
main.allocator.destroy(w);
|
||||
}
|
||||
|
||||
fn write(w: *ZstdWriter, f: std.fs.File, in: []const u8, flush: bool) !void {
|
||||
var arg = c.ZSTD_inBuffer{
|
||||
.src = in.ptr,
|
||||
.size = in.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
const v = c.ZSTD_compressStream2(w.ctx, &w.out, &arg, if (flush) c.ZSTD_e_end else c.ZSTD_e_continue);
|
||||
if (c.ZSTD_isError(v) != 0) return error.ZstdCompressError;
|
||||
if (flush or w.out.pos > w.outbuf.len / 2) {
|
||||
try f.writeAll(w.outbuf[0..w.out.pos]);
|
||||
w.out.pos = 0;
|
||||
}
|
||||
if (!flush and arg.pos == arg.size) break;
|
||||
if (flush and v == 0) break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = struct {
|
||||
fd: std.fs.File,
|
||||
zstd: ?*ZstdWriter = null,
|
||||
// Must be large enough to hold PATH_MAX*6 plus some overhead.
|
||||
// (The 6 is because, in the worst case, every byte expands to a "\u####"
|
||||
// escape, and we do pessimistic estimates here in order to avoid checking
|
||||
// buffer lengths for each and every write operation)
|
||||
buf: [64*1024]u8 = undefined,
|
||||
off: usize = 0,
|
||||
dir_entry_open: bool = false,
|
||||
|
||||
fn flush(ctx: *Writer, bytes: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
// This can only really happen when the root path exceeds PATH_MAX,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (bytes > ctx.buf.len) ui.die("Error writing JSON export: path too long.\n", .{});
|
||||
const buf = ctx.buf[0..ctx.off];
|
||||
(if (ctx.zstd) |z| z.write(ctx.fd, buf, bytes == 0) else ctx.fd.writeAll(buf)) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
ctx.off = 0;
|
||||
}
|
||||
|
||||
fn ensureSpace(ctx: *Writer, bytes: usize) void {
|
||||
if (bytes > ctx.buf.len - ctx.off) ctx.flush(bytes);
|
||||
}
|
||||
|
||||
fn write(ctx: *Writer, s: []const u8) void {
|
||||
@memcpy(ctx.buf[ctx.off..][0..s.len], s);
|
||||
ctx.off += s.len;
|
||||
}
|
||||
|
||||
fn writeByte(ctx: *Writer, b: u8) void {
|
||||
ctx.buf[ctx.off] = b;
|
||||
ctx.off += 1;
|
||||
}
|
||||
|
||||
// Write escaped string contents, excluding the quotes.
|
||||
fn writeStr(ctx: *Writer, s: []const u8) void {
|
||||
for (s) |b| {
|
||||
if (b >= 0x20 and b != '"' and b != '\\' and b != 127) ctx.writeByte(b)
|
||||
else switch (b) {
|
||||
'\n' => ctx.write("\\n"),
|
||||
'\r' => ctx.write("\\r"),
|
||||
0x8 => ctx.write("\\b"),
|
||||
'\t' => ctx.write("\\t"),
|
||||
0xC => ctx.write("\\f"),
|
||||
'\\' => ctx.write("\\\\"),
|
||||
'"' => ctx.write("\\\""),
|
||||
else => {
|
||||
ctx.write("\\u00");
|
||||
const hexdig = "0123456789abcdef";
|
||||
ctx.writeByte(hexdig[b>>4]);
|
||||
ctx.writeByte(hexdig[b&0xf]);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeUint(ctx: *Writer, n: u64) void {
|
||||
// Based on std.fmt.formatInt
|
||||
var a = n;
|
||||
var buf: [24]u8 = undefined;
|
||||
var index: usize = buf.len;
|
||||
while (a >= 100) : (a = @divTrunc(a, 100)) {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a % 100)));
|
||||
}
|
||||
if (a < 10) {
|
||||
index -= 1;
|
||||
buf[index] = '0' + @as(u8, @intCast(a));
|
||||
} else {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a)));
|
||||
}
|
||||
ctx.write(buf[index..]);
|
||||
}
|
||||
|
||||
fn init(out: std.fs.File) *Writer {
|
||||
var ctx = main.allocator.create(Writer) catch unreachable;
|
||||
ctx.* = .{ .fd = out };
|
||||
if (main.config.compress) ctx.zstd = ZstdWriter.create();
|
||||
ctx.write("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
||||
ctx.writeUint(@intCast(@max(0, std.time.timestamp())));
|
||||
ctx.writeByte('}');
|
||||
return ctx;
|
||||
}
|
||||
|
||||
// A newly written directory entry is left "open", i.e. the '}' to close
|
||||
// the item object is not written, to allow for a setReadError() to be
|
||||
// caught if one happens before the first sub entry.
|
||||
// Any read errors after the first sub entry are thrown away, but that's
|
||||
// just a limitation of the JSON format.
|
||||
fn closeDirEntry(ctx: *Writer, rderr: bool) void {
|
||||
if (ctx.dir_entry_open) {
|
||||
ctx.dir_entry_open = false;
|
||||
if (rderr) ctx.write(",\"read_error\":true");
|
||||
ctx.writeByte('}');
|
||||
}
|
||||
}
|
||||
|
||||
fn writeSpecial(ctx: *Writer, name: []const u8, t: model.EType) void {
|
||||
ctx.closeDirEntry(false);
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (t.isDirectory()) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.write(switch (t) {
|
||||
.err => "\",\"read_error\":true}",
|
||||
.otherfs => "\",\"excluded\":\"otherfs\"}",
|
||||
.kernfs => "\",\"excluded\":\"kernfs\"}",
|
||||
.pattern => "\",\"excluded\":\"pattern\"}",
|
||||
else => unreachable,
|
||||
});
|
||||
if (t.isDirectory()) ctx.writeByte(']');
|
||||
}
|
||||
|
||||
fn writeStat(ctx: *Writer, name: []const u8, stat: *const sink.Stat, parent_dev: u64) void {
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (stat.etype == .dir) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.writeByte('"');
|
||||
if (stat.size > 0) {
|
||||
ctx.write(",\"asize\":");
|
||||
ctx.writeUint(stat.size);
|
||||
}
|
||||
if (stat.blocks > 0) {
|
||||
ctx.write(",\"dsize\":");
|
||||
ctx.writeUint(util.blocksToSize(stat.blocks));
|
||||
}
|
||||
if (stat.etype == .dir and stat.dev != parent_dev) {
|
||||
ctx.write(",\"dev\":");
|
||||
ctx.writeUint(stat.dev);
|
||||
}
|
||||
if (stat.etype == .link) {
|
||||
ctx.write(",\"ino\":");
|
||||
ctx.writeUint(stat.ino);
|
||||
ctx.write(",\"hlnkc\":true,\"nlink\":");
|
||||
ctx.writeUint(stat.nlink);
|
||||
}
|
||||
if (stat.etype == .nonreg) ctx.write(",\"notreg\":true");
|
||||
if (main.config.extended) {
|
||||
if (stat.ext.pack.hasuid) {
|
||||
ctx.write(",\"uid\":");
|
||||
ctx.writeUint(stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
ctx.write(",\"gid\":");
|
||||
ctx.writeUint(stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
ctx.write(",\"mode\":");
|
||||
ctx.writeUint(stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
ctx.write(",\"mtime\":");
|
||||
ctx.writeUint(stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Dir = struct {
|
||||
dev: u64,
|
||||
|
||||
pub fn addSpecial(_: *Dir, name: []const u8, sp: model.EType) void {
|
||||
global.writer.writeSpecial(name, sp);
|
||||
}
|
||||
|
||||
pub fn addStat(_: *Dir, name: []const u8, stat: *const sink.Stat) void {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, undefined);
|
||||
global.writer.writeByte('}');
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, d.dev);
|
||||
global.writer.dir_entry_open = true;
|
||||
return .{ .dev = stat.dev };
|
||||
}
|
||||
|
||||
pub fn setReadError(_: *Dir) void {
|
||||
global.writer.closeDirEntry(true);
|
||||
}
|
||||
|
||||
pub fn final(_: *Dir) void {
|
||||
global.writer.ensureSpace(1000);
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeByte(']');
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
var root = Dir{.dev=0};
|
||||
return root.addDir(path, stat);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
global.writer.write("]\n");
|
||||
global.writer.flush(0);
|
||||
if (global.writer.zstd) |z| z.destroy();
|
||||
global.writer.fd.close();
|
||||
main.allocator.destroy(global.writer);
|
||||
}
|
||||
|
||||
pub fn setupOutput(out: std.fs.File) void {
|
||||
global.writer = Writer.init(out);
|
||||
}
|
||||
562
src/json_import.zig
Normal file
562
src/json_import.zig
Normal file
|
|
@ -0,0 +1,562 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const ZstdReader = struct {
|
||||
ctx: ?*c.ZSTD_DStream,
|
||||
in: c.ZSTD_inBuffer,
|
||||
lastret: usize = 0,
|
||||
inbuf: [c.ZSTD_BLOCKSIZE_MAX + 16]u8, // This ZSTD_DStreamInSize() + a little bit extra
|
||||
|
||||
fn create(head: []const u8) *ZstdReader {
|
||||
const r = main.allocator.create(ZstdReader) catch unreachable;
|
||||
@memcpy(r.inbuf[0..head.len], head);
|
||||
r.in = .{
|
||||
.src = &r.inbuf,
|
||||
.size = head.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
r.ctx = c.ZSTD_createDStream();
|
||||
if (r.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
fn destroy(r: *ZstdReader) void {
|
||||
_ = c.ZSTD_freeDStream(r.ctx);
|
||||
main.allocator.destroy(r);
|
||||
}
|
||||
|
||||
fn read(r: *ZstdReader, f: std.fs.File, out: []u8) !usize {
|
||||
while (true) {
|
||||
if (r.in.size == r.in.pos) {
|
||||
r.in.pos = 0;
|
||||
r.in.size = try f.read(&r.inbuf);
|
||||
if (r.in.size == 0) {
|
||||
if (r.lastret == 0) return 0;
|
||||
return error.ZstdDecompressError; // Early EOF
|
||||
}
|
||||
}
|
||||
|
||||
var arg = c.ZSTD_outBuffer{ .dst = out.ptr, .size = out.len, .pos = 0 };
|
||||
r.lastret = c.ZSTD_decompressStream(r.ctx, &arg, &r.in);
|
||||
if (c.ZSTD_isError(r.lastret) != 0) return error.ZstdDecompressError;
|
||||
if (arg.pos > 0) return arg.pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Using a custom JSON parser here because, while std.json is great, it does
|
||||
// perform strict UTF-8 validation. Which is correct, of course, but ncdu dumps
|
||||
// are not always correct JSON as they may contain non-UTF-8 paths encoded as
|
||||
// strings.
|
||||
|
||||
const Parser = struct {
|
||||
rd: std.fs.File,
|
||||
zstd: ?*ZstdReader = null,
|
||||
rdoff: usize = 0,
|
||||
rdsize: usize = 0,
|
||||
byte: u64 = 1,
|
||||
line: u64 = 1,
|
||||
buf: [129*1024]u8 = undefined,
|
||||
|
||||
fn die(p: *Parser, str: []const u8) noreturn {
|
||||
ui.die("Error importing file on line {}:{}: {s}.\n", .{ p.line, p.byte, str });
|
||||
}
|
||||
|
||||
// Feed back a byte that has just been returned by nextByte()
|
||||
fn undoNextByte(p: *Parser, b: u8) void {
|
||||
p.byte -= 1;
|
||||
p.rdoff -= 1;
|
||||
p.buf[p.rdoff] = b;
|
||||
}
|
||||
|
||||
fn fill(p: *Parser) void {
|
||||
p.rdoff = 0;
|
||||
p.rdsize = (if (p.zstd) |z| z.read(p.rd, &p.buf) else p.rd.read(&p.buf)) catch |e| switch (e) {
|
||||
error.IsDir => p.die("not a file"), // should be detected at open() time, but no flag for that...
|
||||
error.SystemResources => p.die("out of memory"),
|
||||
error.ZstdDecompressError => p.die("decompression error"),
|
||||
else => p.die("I/O error"),
|
||||
};
|
||||
}
|
||||
|
||||
// Returns 0 on EOF.
|
||||
// (or if the file contains a 0 byte, but that's invalid anyway)
|
||||
// (Returning a '?u8' here is nicer but kills performance by about +30%)
|
||||
fn nextByte(p: *Parser) u8 {
|
||||
if (p.rdoff == p.rdsize) {
|
||||
@branchHint(.unlikely);
|
||||
p.fill();
|
||||
if (p.rdsize == 0) return 0;
|
||||
}
|
||||
p.byte += 1;
|
||||
defer p.rdoff += 1;
|
||||
return (&p.buf)[p.rdoff];
|
||||
}
|
||||
|
||||
// next non-whitespace byte
|
||||
fn nextChr(p: *Parser) u8 {
|
||||
while (true) switch (p.nextByte()) {
|
||||
'\n' => {
|
||||
p.line += 1;
|
||||
p.byte = 1;
|
||||
},
|
||||
' ', '\t', '\r' => {},
|
||||
else => |b| return b,
|
||||
};
|
||||
}
|
||||
|
||||
fn expectLit(p: *Parser, lit: []const u8) void {
|
||||
for (lit) |b| if (b != p.nextByte()) p.die("invalid JSON");
|
||||
}
|
||||
|
||||
fn hexdig(p: *Parser) u16 {
|
||||
const b = p.nextByte();
|
||||
return switch (b) {
|
||||
'0'...'9' => b - '0',
|
||||
'a'...'f' => b - 'a' + 10,
|
||||
'A'...'F' => b - 'A' + 10,
|
||||
else => p.die("invalid hex digit"),
|
||||
};
|
||||
}
|
||||
|
||||
fn stringContentSlow(p: *Parser, buf: []u8, head: u8, off: usize) []u8 {
|
||||
@branchHint(.unlikely);
|
||||
var b = head;
|
||||
var n = off;
|
||||
while (true) {
|
||||
switch (b) {
|
||||
'"' => break,
|
||||
'\\' => switch (p.nextByte()) {
|
||||
'"' => if (n < buf.len) { buf[n] = '"'; n += 1; },
|
||||
'\\'=> if (n < buf.len) { buf[n] = '\\';n += 1; },
|
||||
'/' => if (n < buf.len) { buf[n] = '/'; n += 1; },
|
||||
'b' => if (n < buf.len) { buf[n] = 0x8; n += 1; },
|
||||
'f' => if (n < buf.len) { buf[n] = 0xc; n += 1; },
|
||||
'n' => if (n < buf.len) { buf[n] = 0xa; n += 1; },
|
||||
'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
|
||||
't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
|
||||
'u' => {
|
||||
const first = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
var unit = @as(u21, first);
|
||||
if (std.unicode.utf16IsLowSurrogate(first)) p.die("Unexpected low surrogate");
|
||||
if (std.unicode.utf16IsHighSurrogate(first)) {
|
||||
p.expectLit("\\u");
|
||||
const second = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
unit = std.unicode.utf16DecodeSurrogatePair(&.{first, second}) catch p.die("Invalid low surrogate");
|
||||
}
|
||||
if (n + 6 < buf.len)
|
||||
n += std.unicode.utf8Encode(unit, buf[n..n+5]) catch unreachable;
|
||||
},
|
||||
else => p.die("invalid escape sequence"),
|
||||
},
|
||||
0x20, 0x21, 0x23...0x5b, 0x5d...0xff => if (n < buf.len) { buf[n] = b; n += 1; },
|
||||
else => p.die("invalid character in string"),
|
||||
}
|
||||
b = p.nextByte();
|
||||
}
|
||||
return buf[0..n];
|
||||
}
|
||||
|
||||
// Read a string (after the ") into buf.
|
||||
// Any characters beyond the size of the buffer are consumed but otherwise discarded.
|
||||
fn stringContent(p: *Parser, buf: []u8) []u8 {
|
||||
// The common case (for ncdu dumps): string fits in the given buffer and does not contain any escapes.
|
||||
var n: usize = 0;
|
||||
var b = p.nextByte();
|
||||
while (n < buf.len and b >= 0x20 and b != '"' and b != '\\') {
|
||||
buf[n] = b;
|
||||
n += 1;
|
||||
b = p.nextByte();
|
||||
}
|
||||
if (b == '"') return buf[0..n];
|
||||
return p.stringContentSlow(buf, b, n);
|
||||
}
|
||||
|
||||
fn string(p: *Parser, buf: []u8) []u8 {
|
||||
if (p.nextChr() != '"') p.die("expected string");
|
||||
return p.stringContent(buf);
|
||||
}
|
||||
|
||||
fn uintTail(p: *Parser, head: u8, T: anytype) T {
|
||||
if (head == '0') return 0;
|
||||
var v: T = head - '0'; // Assumption: T >= u8
|
||||
// Assumption: we don't parse JSON "documents" that are a bare uint.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'0'...'9' => |b| {
|
||||
const newv = v *% 10 +% (b - '0');
|
||||
if (newv < v) p.die("integer out of range");
|
||||
v = newv;
|
||||
},
|
||||
else => |b| break p.undoNextByte(b),
|
||||
};
|
||||
if (v == 0) p.die("expected number");
|
||||
return v;
|
||||
}
|
||||
|
||||
fn uint(p: *Parser, T: anytype) T {
|
||||
switch (p.nextChr()) {
|
||||
'0'...'9' => |b| return p.uintTail(b, T),
|
||||
else => p.die("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
fn boolean(p: *Parser) bool {
|
||||
switch (p.nextChr()) {
|
||||
't' => { p.expectLit("rue"); return true; },
|
||||
'f' => { p.expectLit("alse"); return false; },
|
||||
else => p.die("expected boolean"),
|
||||
}
|
||||
}
|
||||
|
||||
fn obj(p: *Parser) void {
|
||||
if (p.nextChr() != '{') p.die("expected object");
|
||||
}
|
||||
|
||||
fn key(p: *Parser, first: bool, buf: []u8) ?[]u8 {
|
||||
const k = switch (p.nextChr()) {
|
||||
',' => blk: {
|
||||
if (first) p.die("invalid JSON");
|
||||
break :blk p.string(buf);
|
||||
},
|
||||
'"' => blk: {
|
||||
if (!first) p.die("invalid JSON");
|
||||
break :blk p.stringContent(buf);
|
||||
},
|
||||
'}' => return null,
|
||||
else => p.die("invalid JSON"),
|
||||
};
|
||||
if (p.nextChr() != ':') p.die("invalid JSON");
|
||||
return k;
|
||||
}
|
||||
|
||||
fn array(p: *Parser) void {
|
||||
if (p.nextChr() != '[') p.die("expected array");
|
||||
}
|
||||
|
||||
fn elem(p: *Parser, first: bool) bool {
|
||||
switch (p.nextChr()) {
|
||||
',' => if (first) p.die("invalid JSON") else return true,
|
||||
']' => return false,
|
||||
else => |b| {
|
||||
if (!first) p.die("invalid JSON");
|
||||
p.undoNextByte(b);
|
||||
return true;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn skipContent(p: *Parser, head: u8) void {
|
||||
switch (head) {
|
||||
't' => p.expectLit("rue"),
|
||||
'f' => p.expectLit("alse"),
|
||||
'n' => p.expectLit("ull"),
|
||||
'-', '0'...'9' =>
|
||||
// Numbers are kind of annoying, this "parsing" is invalid and ultra-lazy.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'-', '+', 'e', 'E', '.', '0'...'9' => {},
|
||||
else => |b| return p.undoNextByte(b),
|
||||
},
|
||||
'"' => _ = p.stringContent(&[0]u8{}),
|
||||
'[' => {
|
||||
var first = true;
|
||||
while (p.elem(first)) {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
'{' => {
|
||||
var first = true;
|
||||
while (p.key(first, &[0]u8{})) |_| {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
else => p.die("invalid JSON"),
|
||||
}
|
||||
}
|
||||
|
||||
fn skip(p: *Parser) void {
|
||||
p.skipContent(p.nextChr());
|
||||
}
|
||||
|
||||
fn eof(p: *Parser) void {
|
||||
if (p.nextChr() != 0) p.die("trailing garbage");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Should really add some invalid JSON test cases as well, but I'd first like
|
||||
// to benchmark the performance impact of using error returns instead of
|
||||
// calling ui.die().
|
||||
test "JSON parser" {
|
||||
const json =
|
||||
\\{
|
||||
\\ "null": null,
|
||||
\\ "true": true,
|
||||
\\ "false": false,
|
||||
\\ "zero":0 ,"uint": 123,
|
||||
\\ "emptyObj": {},
|
||||
\\ "emptyArray": [],
|
||||
\\ "emptyString": "",
|
||||
\\ "encString": "\"\\\/\b\f\n\uBe3F",
|
||||
\\ "numbers": [0,1,20,-300, 3.4 ,0e-10 , -100.023e+13 ]
|
||||
\\}
|
||||
;
|
||||
var p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
p.skip();
|
||||
|
||||
p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
var buf: [128]u8 = undefined;
|
||||
p.obj();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(true, &buf).?, "null");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "true");
|
||||
try std.testing.expect(p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "false");
|
||||
try std.testing.expect(!p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "zero");
|
||||
try std.testing.expectEqual(0, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "uint");
|
||||
try std.testing.expectEqual(123, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyObj");
|
||||
p.obj();
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyArray");
|
||||
p.array();
|
||||
try std.testing.expect(!p.elem(true));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "encString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "\"\\/\x08\x0c\n\u{be3f}");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "numbers");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
}
|
||||
|
||||
|
||||
const Ctx = struct {
|
||||
p: *Parser,
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
rderr: bool = false,
|
||||
namelen: usize = 0,
|
||||
namebuf: [32*1024]u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
fn itemkey(ctx: *Ctx, key: []const u8) void {
|
||||
const eq = std.mem.eql;
|
||||
switch (if (key.len > 0) key[0] else @as(u8,0)) {
|
||||
'a' => {
|
||||
if (eq(u8, key, "asize")) {
|
||||
ctx.stat.size = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'd' => {
|
||||
if (eq(u8, key, "dsize")) {
|
||||
ctx.stat.blocks = @intCast(ctx.p.uint(u64)>>9);
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "dev")) {
|
||||
ctx.stat.dev = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'e' => {
|
||||
if (eq(u8, key, "excluded")) {
|
||||
var buf: [32]u8 = undefined;
|
||||
const typ = ctx.p.string(&buf);
|
||||
// "frmlnk" is also possible, but currently considered equivalent to "pattern".
|
||||
ctx.stat.etype =
|
||||
if (eq(u8, typ, "otherfs") or eq(u8, typ, "othfs")) .otherfs
|
||||
else if (eq(u8, typ, "kernfs")) .kernfs
|
||||
else .pattern;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'g' => {
|
||||
if (eq(u8, key, "gid")) {
|
||||
ctx.stat.ext.gid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasgid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'h' => {
|
||||
if (eq(u8, key, "hlnkc")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'i' => {
|
||||
if (eq(u8, key, "ino")) {
|
||||
ctx.stat.ino = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'm' => {
|
||||
if (eq(u8, key, "mode")) {
|
||||
ctx.stat.ext.mode = ctx.p.uint(u16);
|
||||
ctx.stat.ext.pack.hasmode = true;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "mtime")) {
|
||||
ctx.stat.ext.mtime = ctx.p.uint(u64);
|
||||
ctx.stat.ext.pack.hasmtime = true;
|
||||
// Accept decimal numbers, but discard the fractional part because our data model doesn't support it.
|
||||
switch (ctx.p.nextByte()) {
|
||||
'.' =>
|
||||
while (true) switch (ctx.p.nextByte()) {
|
||||
'0'...'9' => {},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
}
|
||||
}
|
||||
},
|
||||
'n' => {
|
||||
if (eq(u8, key, "name")) {
|
||||
if (ctx.namelen != 0) ctx.p.die("duplicate key");
|
||||
ctx.namelen = ctx.p.string(&ctx.namebuf).len;
|
||||
if (ctx.namelen > ctx.namebuf.len-5) ctx.p.die("too long file name");
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "nlink")) {
|
||||
ctx.stat.nlink = ctx.p.uint(u31);
|
||||
if (ctx.stat.etype != .dir and ctx.stat.nlink > 1)
|
||||
ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "notreg")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .nonreg;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'r' => {
|
||||
if (eq(u8, key, "read_error")) {
|
||||
if (ctx.p.boolean()) {
|
||||
if (ctx.stat.etype == .dir) ctx.rderr = true
|
||||
else ctx.stat.etype = .err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
},
|
||||
'u' => {
|
||||
if (eq(u8, key, "uid")) {
|
||||
ctx.stat.ext.uid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasuid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
ctx.p.skip();
|
||||
}
|
||||
|
||||
|
||||
fn item(ctx: *Ctx, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.namelen = 0;
|
||||
ctx.rderr = false;
|
||||
const isdir = switch (ctx.p.nextChr()) {
|
||||
'[' => blk: {
|
||||
ctx.p.obj();
|
||||
break :blk true;
|
||||
},
|
||||
'{' => false,
|
||||
else => ctx.p.die("expected object or array"),
|
||||
};
|
||||
if (parent == null and !isdir) ctx.p.die("parent item must be a directory");
|
||||
ctx.stat.etype = if (isdir) .dir else .reg;
|
||||
|
||||
var keybuf: [32]u8 = undefined;
|
||||
var first = true;
|
||||
while (ctx.p.key(first, &keybuf)) |k| {
|
||||
first = false;
|
||||
itemkey(ctx, k);
|
||||
}
|
||||
if (ctx.namelen == 0) ctx.p.die("missing \"name\" field");
|
||||
const name = (&ctx.namebuf)[0..ctx.namelen];
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const ndev = ctx.stat.dev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, name, &ctx.stat)
|
||||
else sink.createRoot(name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.rderr) dir.setReadError(ctx.sink);
|
||||
while (ctx.p.elem(false)) item(ctx, dir, ndev);
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
|
||||
} else {
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
parent.?.addSpecial(ctx.sink, name, ctx.stat.etype)
|
||||
else
|
||||
parent.?.addStat(ctx.sink, name, &ctx.stat);
|
||||
if (isdir and ctx.p.elem(false)) ctx.p.die("unexpected contents in an excluded directory");
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
|
||||
|
||||
pub fn import(fd: std.fs.File, head: []const u8) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
defer sink.done();
|
||||
|
||||
var p = Parser{.rd = fd};
|
||||
defer if (p.zstd) |z| z.destroy();
|
||||
|
||||
if (head.len >= 4 and std.mem.eql(u8, head[0..4], "\x28\xb5\x2f\xfd")) {
|
||||
p.zstd = ZstdReader.create(head);
|
||||
} else {
|
||||
p.rdsize = head.len;
|
||||
@memcpy(p.buf[0..head.len], head);
|
||||
}
|
||||
p.array();
|
||||
if (p.uint(u16) != 1) p.die("incompatible major format version");
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
_ = p.uint(u16); // minor version, ignored for now
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
|
||||
// metadata object
|
||||
p.obj();
|
||||
p.skipContent('{');
|
||||
|
||||
// Items
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
var ctx = Ctx{.p = &p, .sink = &sink_threads[0]};
|
||||
item(&ctx, null, 0);
|
||||
|
||||
// accept more trailing elements
|
||||
while (p.elem(false)) p.skip();
|
||||
p.eof();
|
||||
}
|
||||
59
src/main.c
59
src/main.c
|
|
@ -1,59 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
/* check ncdu.h what these are for */
|
||||
struct dir dat;
|
||||
int winrows, wincols;
|
||||
char sdir[PATH_MAX];
|
||||
int sflags, bflags, sdelay, bgraph;
|
||||
|
||||
/* main program */
|
||||
int main(int argc, char **argv) {
|
||||
int r, gd;
|
||||
gd = settingsCli(argc, argv);
|
||||
initscr();
|
||||
cbreak();
|
||||
noecho();
|
||||
curs_set(0);
|
||||
keypad(stdscr, TRUE);
|
||||
ncresize();
|
||||
|
||||
if(gd && settingsWin()) goto mainend;
|
||||
while((r = calcUsage()) != 0) {
|
||||
if(r == 1 && settingsWin()) goto mainend;
|
||||
else if(r == 2) goto mainend;
|
||||
}
|
||||
showBrowser();
|
||||
|
||||
mainend:
|
||||
erase();
|
||||
refresh();
|
||||
endwin();
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
693
src/main.zig
Normal file
693
src/main.zig
Normal file
|
|
@ -0,0 +1,693 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const program_version = "2.9.2";
|
||||
|
||||
const std = @import("std");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const json_import = @import("json_import.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const bin_reader = @import("bin_reader.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const delete = @import("delete.zig");
|
||||
const util = @import("util.zig");
|
||||
const exclude = @import("exclude.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
test "imports" {
|
||||
_ = model;
|
||||
_ = scan;
|
||||
_ = json_import;
|
||||
_ = json_export;
|
||||
_ = bin_export;
|
||||
_ = bin_reader;
|
||||
_ = sink;
|
||||
_ = mem_src;
|
||||
_ = mem_sink;
|
||||
_ = ui;
|
||||
_ = browser;
|
||||
_ = delete;
|
||||
_ = util;
|
||||
_ = exclude;
|
||||
}
|
||||
|
||||
// "Custom" allocator that wraps the libc allocator and calls ui.oom() on error.
|
||||
// This allocator never returns an error, it either succeeds or causes ncdu to quit.
|
||||
// (Which means you'll find a lot of "catch unreachable" sprinkled through the code,
|
||||
// they look scarier than they are)
|
||||
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: std.mem.Alignment, return_address: usize) ?[*]u8 {
|
||||
while (true) {
|
||||
if (std.heap.c_allocator.vtable.alloc(undefined, len, ptr_alignment, return_address)) |r|
|
||||
return r
|
||||
else {}
|
||||
ui.oom();
|
||||
}
|
||||
}
|
||||
|
||||
pub const allocator = std.mem.Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &.{
|
||||
.alloc = wrapAlloc,
|
||||
// AFAIK, all uses of resize() to grow an allocation will fall back to alloc() on failure.
|
||||
.resize = std.heap.c_allocator.vtable.resize,
|
||||
.remap = std.heap.c_allocator.vtable.remap,
|
||||
.free = std.heap.c_allocator.vtable.free,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Custom panic impl to reset the terminal before spewing out an error message.
|
||||
pub const panic = std.debug.FullPanic(struct {
|
||||
pub fn panicFn(msg: []const u8, first_trace_addr: ?usize) noreturn {
|
||||
@branchHint(.cold);
|
||||
ui.deinit();
|
||||
std.debug.defaultPanic(msg, first_trace_addr);
|
||||
}
|
||||
}.panicFn);
|
||||
|
||||
pub const config = struct {
|
||||
pub const SortCol = enum { name, blocks, size, items, mtime };
|
||||
pub const SortOrder = enum { asc, desc };
|
||||
|
||||
pub var same_fs: bool = false;
|
||||
pub var extended: bool = false;
|
||||
pub var follow_symlinks: bool = false;
|
||||
pub var exclude_caches: bool = false;
|
||||
pub var exclude_kernfs: bool = false;
|
||||
pub var threads: usize = 1;
|
||||
pub var complevel: u8 = 4;
|
||||
pub var compress: bool = false;
|
||||
pub var export_block_size: ?usize = null;
|
||||
|
||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||
pub var scan_ui: ?enum { none, line, full } = null;
|
||||
pub var si: bool = false;
|
||||
pub var nc_tty: bool = false;
|
||||
pub var ui_color: enum { off, dark, darkbg } = .off;
|
||||
pub var thousands_sep: []const u8 = ",";
|
||||
|
||||
pub var show_hidden: bool = true;
|
||||
pub var show_blocks: bool = true;
|
||||
pub var show_shared: enum { off, shared, unique } = .shared;
|
||||
pub var show_items: bool = false;
|
||||
pub var show_mtime: bool = false;
|
||||
pub var show_graph: bool = true;
|
||||
pub var show_percent: bool = false;
|
||||
pub var graph_style: enum { hash, half, eighth } = .hash;
|
||||
pub var sort_col: SortCol = .blocks;
|
||||
pub var sort_order: SortOrder = .desc;
|
||||
pub var sort_dirsfirst: bool = false;
|
||||
pub var sort_natural: bool = true;
|
||||
|
||||
pub var imported: bool = false;
|
||||
pub var binreader: bool = false;
|
||||
pub var can_delete: ?bool = null;
|
||||
pub var can_shell: ?bool = null;
|
||||
pub var can_refresh: ?bool = null;
|
||||
pub var confirm_quit: bool = false;
|
||||
pub var confirm_delete: bool = true;
|
||||
pub var ignore_delete_errors: bool = false;
|
||||
pub var delete_command: [:0]const u8 = "";
|
||||
};
|
||||
|
||||
pub var state: enum { scan, browse, refresh, shell, delete } = .scan;
|
||||
|
||||
const stdin = if (@hasDecl(std.io, "getStdIn")) std.io.getStdIn() else std.fs.File.stdin();
|
||||
const stdout = if (@hasDecl(std.io, "getStdOut")) std.io.getStdOut() else std.fs.File.stdout();
|
||||
|
||||
// Simple generic argument parser, supports getopt_long() style arguments.
|
||||
const Args = struct {
|
||||
lst: []const [:0]const u8,
|
||||
short: ?[:0]const u8 = null, // Remainder after a short option, e.g. -x<stuff> (which may be either more short options or an argument)
|
||||
last: ?[]const u8 = null,
|
||||
last_arg: ?[:0]const u8 = null, // In the case of --option=<arg>
|
||||
shortbuf: [2]u8 = undefined,
|
||||
argsep: bool = false,
|
||||
ignerror: bool = false,
|
||||
|
||||
const Self = @This();
|
||||
const Option = struct {
|
||||
opt: bool,
|
||||
val: []const u8,
|
||||
|
||||
fn is(self: @This(), cmp: []const u8) bool {
|
||||
return self.opt and std.mem.eql(u8, self.val, cmp);
|
||||
}
|
||||
};
|
||||
|
||||
fn init(lst: []const [:0]const u8) Self {
|
||||
return Self{ .lst = lst };
|
||||
}
|
||||
|
||||
fn pop(self: *Self) ?[:0]const u8 {
|
||||
if (self.lst.len == 0) return null;
|
||||
defer self.lst = self.lst[1..];
|
||||
return self.lst[0];
|
||||
}
|
||||
|
||||
fn shortopt(self: *Self, s: [:0]const u8) Option {
|
||||
self.shortbuf[0] = '-';
|
||||
self.shortbuf[1] = s[0];
|
||||
self.short = if (s.len > 1) s[1.. :0] else null;
|
||||
self.last = &self.shortbuf;
|
||||
return .{ .opt = true, .val = &self.shortbuf };
|
||||
}
|
||||
|
||||
pub fn die(self: *const Self, comptime msg: []const u8, args: anytype) !noreturn {
|
||||
if (self.ignerror) return error.InvalidArg;
|
||||
ui.die(msg, args);
|
||||
}
|
||||
|
||||
/// Return the next option or positional argument.
|
||||
/// 'opt' indicates whether it's an option or positional argument,
|
||||
/// 'val' will be either -x, --something or the argument.
|
||||
pub fn next(self: *Self) !?Option {
|
||||
if (self.last_arg != null) try self.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||
if (self.short) |s| return self.shortopt(s);
|
||||
const val = self.pop() orelse return null;
|
||||
if (self.argsep or val.len == 0 or val[0] != '-') return Option{ .opt = false, .val = val };
|
||||
if (val.len == 1) try self.die("Invalid option '-'.\n", .{});
|
||||
if (val.len == 2 and val[1] == '-') {
|
||||
self.argsep = true;
|
||||
return self.next();
|
||||
}
|
||||
if (val[1] == '-') {
|
||||
if (std.mem.indexOfScalar(u8, val, '=')) |sep| {
|
||||
if (sep == 2) try self.die("Invalid option '{s}'.\n", .{val});
|
||||
self.last_arg = val[sep+1.. :0];
|
||||
self.last = val[0..sep];
|
||||
return Option{ .opt = true, .val = self.last.? };
|
||||
}
|
||||
self.last = val;
|
||||
return Option{ .opt = true, .val = val };
|
||||
}
|
||||
return self.shortopt(val[1..:0]);
|
||||
}
|
||||
|
||||
/// Returns the argument given to the last returned option. Dies with an error if no argument is provided.
|
||||
pub fn arg(self: *Self) ![:0]const u8 {
|
||||
if (self.short) |a| {
|
||||
defer self.short = null;
|
||||
return a;
|
||||
}
|
||||
if (self.last_arg) |a| {
|
||||
defer self.last_arg = null;
|
||||
return a;
|
||||
}
|
||||
if (self.pop()) |o| return o;
|
||||
try self.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||
}
|
||||
};
|
||||
|
||||
fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||
if (opt.is("-q") or opt.is("--slow-ui-updates")) config.update_delay = 2*std.time.ns_per_s
|
||||
else if (opt.is("--fast-ui-updates")) config.update_delay = 100*std.time.ns_per_ms
|
||||
else if (opt.is("-x") or opt.is("--one-file-system")) config.same_fs = true
|
||||
else if (opt.is("--cross-file-system")) config.same_fs = false
|
||||
else if (opt.is("-e") or opt.is("--extended")) config.extended = true
|
||||
else if (opt.is("--no-extended")) config.extended = false
|
||||
else if (opt.is("-r") and !(config.can_delete orelse true)) config.can_shell = false
|
||||
else if (opt.is("-r")) config.can_delete = false
|
||||
else if (opt.is("--enable-shell")) config.can_shell = true
|
||||
else if (opt.is("--disable-shell")) config.can_shell = false
|
||||
else if (opt.is("--enable-delete")) config.can_delete = true
|
||||
else if (opt.is("--disable-delete")) config.can_delete = false
|
||||
else if (opt.is("--enable-refresh")) config.can_refresh = true
|
||||
else if (opt.is("--disable-refresh")) config.can_refresh = false
|
||||
else if (opt.is("--show-hidden")) config.show_hidden = true
|
||||
else if (opt.is("--hide-hidden")) config.show_hidden = false
|
||||
else if (opt.is("--show-itemcount")) config.show_items = true
|
||||
else if (opt.is("--hide-itemcount")) config.show_items = false
|
||||
else if (opt.is("--show-mtime")) config.show_mtime = true
|
||||
else if (opt.is("--hide-mtime")) config.show_mtime = false
|
||||
else if (opt.is("--show-graph")) config.show_graph = true
|
||||
else if (opt.is("--hide-graph")) config.show_graph = false
|
||||
else if (opt.is("--show-percent")) config.show_percent = true
|
||||
else if (opt.is("--hide-percent")) config.show_percent = false
|
||||
else if (opt.is("--group-directories-first")) config.sort_dirsfirst = true
|
||||
else if (opt.is("--no-group-directories-first")) config.sort_dirsfirst = false
|
||||
else if (opt.is("--enable-natsort")) config.sort_natural = true
|
||||
else if (opt.is("--disable-natsort")) config.sort_natural = false
|
||||
else if (opt.is("--graph-style")) {
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "hash")) config.graph_style = .hash
|
||||
else if (std.mem.eql(u8, val, "half-block")) config.graph_style = .half
|
||||
else if (std.mem.eql(u8, val, "eighth-block") or std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eighth
|
||||
else try args.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||
} else if (opt.is("--sort")) {
|
||||
var val: []const u8 = try args.arg();
|
||||
var ord: ?config.SortOrder = null;
|
||||
if (std.mem.endsWith(u8, val, "-asc")) {
|
||||
val = val[0..val.len-4];
|
||||
ord = .asc;
|
||||
} else if (std.mem.endsWith(u8, val, "-desc")) {
|
||||
val = val[0..val.len-5];
|
||||
ord = .desc;
|
||||
}
|
||||
if (std.mem.eql(u8, val, "name")) {
|
||||
config.sort_col = .name;
|
||||
config.sort_order = ord orelse .asc;
|
||||
} else if (std.mem.eql(u8, val, "disk-usage")) {
|
||||
config.sort_col = .blocks;
|
||||
config.sort_order = ord orelse .desc;
|
||||
} else if (std.mem.eql(u8, val, "apparent-size")) {
|
||||
config.sort_col = .size;
|
||||
config.sort_order = ord orelse .desc;
|
||||
} else if (std.mem.eql(u8, val, "itemcount")) {
|
||||
config.sort_col = .items;
|
||||
config.sort_order = ord orelse .desc;
|
||||
} else if (std.mem.eql(u8, val, "mtime")) {
|
||||
config.sort_col = .mtime;
|
||||
config.sort_order = ord orelse .asc;
|
||||
} else try args.die("Unknown --sort option: {s}.\n", .{val});
|
||||
} else if (opt.is("--shared-column")) {
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.show_shared = .off
|
||||
else if (std.mem.eql(u8, val, "shared")) config.show_shared = .shared
|
||||
else if (std.mem.eql(u8, val, "unique")) config.show_shared = .unique
|
||||
else try args.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||
} else if (opt.is("--apparent-size")) config.show_blocks = false
|
||||
else if (opt.is("--disk-usage")) config.show_blocks = true
|
||||
else if (opt.is("-0")) config.scan_ui = .none
|
||||
else if (opt.is("-1")) config.scan_ui = .line
|
||||
else if (opt.is("-2")) config.scan_ui = .full
|
||||
else if (opt.is("--si")) config.si = true
|
||||
else if (opt.is("--no-si")) config.si = false
|
||||
else if (opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
|
||||
else if (opt.is("--no-follow-symlinks")) config.follow_symlinks = false
|
||||
else if (opt.is("--exclude")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
exclude.addPattern(arg);
|
||||
} else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
readExcludeFile(arg) catch |e| try args.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||
} else if (opt.is("--exclude-caches")) config.exclude_caches = true
|
||||
else if (opt.is("--include-caches")) config.exclude_caches = false
|
||||
else if (opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
||||
else if (opt.is("--include-kernfs")) config.exclude_kernfs = false
|
||||
else if (opt.is("-c") or opt.is("--compress")) config.compress = true
|
||||
else if (opt.is("--no-compress")) config.compress = false
|
||||
else if (opt.is("--compress-level")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
if (num <= 0 or num > 20) try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
config.complevel = num;
|
||||
} else if (opt.is("--export-block-size")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u14, val, 10) catch try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
if (num < 4 or num > 16000) try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
config.export_block_size = @as(usize, num) * 1024;
|
||||
} else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||
else if (opt.is("--no-confirm-quit")) config.confirm_quit = false
|
||||
else if (opt.is("--confirm-delete")) config.confirm_delete = true
|
||||
else if (opt.is("--no-confirm-delete")) config.confirm_delete = false
|
||||
else if (opt.is("--delete-command")) config.delete_command = allocator.dupeZ(u8, try args.arg()) catch unreachable
|
||||
else if (opt.is("--color")) {
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.ui_color = .off
|
||||
else if (std.mem.eql(u8, val, "dark")) config.ui_color = .dark
|
||||
else if (std.mem.eql(u8, val, "dark-bg")) config.ui_color = .darkbg
|
||||
else try args.die("Unknown --color option: {s}.\n", .{val});
|
||||
} else if (opt.is("-t") or opt.is("--threads")) {
|
||||
const val = try args.arg();
|
||||
config.threads = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number of --threads: {s}.\n", .{val});
|
||||
} else return error.UnknownOption;
|
||||
}
|
||||
|
||||
fn tryReadArgsFile(path: [:0]const u8) void {
|
||||
var f = std.fs.cwd().openFileZ(path, .{}) catch |e| switch (e) {
|
||||
error.FileNotFound => return,
|
||||
error.NotDir => return,
|
||||
else => ui.die("Error opening {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) }),
|
||||
};
|
||||
defer f.close();
|
||||
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
|
||||
while (true) {
|
||||
const line_ = (line_rd.read() catch |e|
|
||||
ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) })
|
||||
) orelse break;
|
||||
|
||||
var argc: usize = 0;
|
||||
var ignerror = false;
|
||||
var arglist: [2][:0]const u8 = .{ "", "" };
|
||||
|
||||
var line = std.mem.trim(u8, line_, &std.ascii.whitespace);
|
||||
if (line.len > 0 and line[0] == '@') {
|
||||
ignerror = true;
|
||||
line = line[1..];
|
||||
}
|
||||
if (line.len == 0 or line[0] == '#') continue;
|
||||
if (std.mem.indexOfAny(u8, line, " \t=")) |i| {
|
||||
arglist[argc] = allocator.dupeZ(u8, line[0..i]) catch unreachable;
|
||||
argc += 1;
|
||||
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.whitespace);
|
||||
}
|
||||
arglist[argc] = allocator.dupeZ(u8, line) catch unreachable;
|
||||
argc += 1;
|
||||
|
||||
var args = Args.init(arglist[0..argc]);
|
||||
args.ignerror = ignerror;
|
||||
while (args.next() catch null) |opt| {
|
||||
if (argConfig(&args, opt, true)) |_| {}
|
||||
else |_| {
|
||||
if (ignerror) break;
|
||||
ui.die("Unrecognized option in config file '{s}': {s}.\nRun with --ignore-config to skip reading config files.\n", .{path, opt.val});
|
||||
}
|
||||
}
|
||||
allocator.free(arglist[0]);
|
||||
if (argc == 2) allocator.free(arglist[1]);
|
||||
}
|
||||
}
|
||||
|
||||
fn version() noreturn {
|
||||
stdout.writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
fn help() noreturn {
|
||||
stdout.writeAll(
|
||||
\\ncdu <options> <directory>
|
||||
\\
|
||||
\\Mode selection:
|
||||
\\ -h, --help This help message
|
||||
\\ -v, -V, --version Print version
|
||||
\\ -f FILE Import scanned directory from FILE
|
||||
\\ -o FILE Export scanned directory to FILE in JSON format
|
||||
\\ -O FILE Export scanned directory to FILE in binary format
|
||||
\\ -e, --extended Enable extended information
|
||||
\\ --ignore-config Don't load config files
|
||||
\\
|
||||
\\Scan options:
|
||||
\\ -x, --one-file-system Stay on the same filesystem
|
||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||
\\ -t NUM Scan with NUM threads
|
||||
\\
|
||||
\\Export options:
|
||||
\\ -c, --compress Use Zstandard compression with `-o`
|
||||
\\ --compress-level NUM Set compression level
|
||||
\\ --export-block-size KIB Set export block size with `-O`
|
||||
\\
|
||||
\\Interface options:
|
||||
\\ -0, -1, -2 UI to use when scanning (0=none,2=full ncurses)
|
||||
\\ -q, --slow-ui-updates "Quiet" mode, refresh interval 2 seconds
|
||||
\\ --enable-shell Enable/disable shell spawning feature
|
||||
\\ --enable-delete Enable/disable file deletion feature
|
||||
\\ --enable-refresh Enable/disable directory refresh feature
|
||||
\\ -r Read only (--disable-delete)
|
||||
\\ -rr Read only++ (--disable-delete & --disable-shell)
|
||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||
\\ --apparent-size Show apparent size instead of disk usage by default
|
||||
\\ --hide-hidden Hide "hidden" or excluded files by default
|
||||
\\ --show-itemcount Show item count column by default
|
||||
\\ --show-mtime Show mtime column by default (requires `-e`)
|
||||
\\ --show-graph Show graph column by default
|
||||
\\ --show-percent Show percent column by default
|
||||
\\ --graph-style STYLE hash / half-block / eighth-block
|
||||
\\ --shared-column off / shared / unique
|
||||
\\ --sort COLUMN-(asc/desc) disk-usage / name / apparent-size / itemcount / mtime
|
||||
\\ --enable-natsort Use natural order when sorting by name
|
||||
\\ --group-directories-first Sort directories before files
|
||||
\\ --confirm-quit Ask confirmation before quitting ncdu
|
||||
\\ --no-confirm-delete Don't ask confirmation before deletion
|
||||
\\ --delete-command CMD Command to run for file deletion
|
||||
\\ --color SCHEME off / dark / dark-bg
|
||||
\\
|
||||
\\Refer to `man ncdu` for more information.
|
||||
\\
|
||||
) catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
fn readExcludeFile(path: [:0]const u8) !void {
|
||||
const f = try std.fs.cwd().openFileZ(path, .{});
|
||||
defer f.close();
|
||||
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
while (try line_rd.read()) |line| {
|
||||
if (line.len > 0)
|
||||
exclude.addPattern(line);
|
||||
}
|
||||
}
|
||||
|
||||
fn readImport(path: [:0]const u8) !void {
|
||||
const fd =
|
||||
if (std.mem.eql(u8, "-", path)) stdin
|
||||
else try std.fs.cwd().openFileZ(path, .{});
|
||||
errdefer fd.close();
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
if (8 != try fd.readAll(&buf)) return error.EndOfStream;
|
||||
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
|
||||
try bin_reader.open(fd);
|
||||
config.binreader = true;
|
||||
} else {
|
||||
json_import.import(fd, &buf);
|
||||
fd.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn main() void {
|
||||
ui.main_thread = std.Thread.getCurrentId();
|
||||
|
||||
// Grab thousands_sep from the current C locale.
|
||||
_ = c.setlocale(c.LC_ALL, "");
|
||||
if (c.localeconv()) |locale| {
|
||||
if (locale.*.thousands_sep) |sep| {
|
||||
const span = std.mem.sliceTo(sep, 0);
|
||||
if (span.len > 0)
|
||||
config.thousands_sep = span;
|
||||
}
|
||||
}
|
||||
|
||||
const loadConf = blk: {
|
||||
var args = std.process.ArgIteratorPosix.init();
|
||||
while (args.next()) |a|
|
||||
if (std.mem.eql(u8, a, "--ignore-config"))
|
||||
break :blk false;
|
||||
break :blk true;
|
||||
};
|
||||
|
||||
if (loadConf) {
|
||||
tryReadArgsFile("/etc/ncdu.conf");
|
||||
|
||||
if (std.posix.getenvZ("XDG_CONFIG_HOME")) |p| {
|
||||
const path = std.fs.path.joinZ(allocator, &.{p, "ncdu", "config"}) catch unreachable;
|
||||
defer allocator.free(path);
|
||||
tryReadArgsFile(path);
|
||||
} else if (std.posix.getenvZ("HOME")) |p| {
|
||||
const path = std.fs.path.joinZ(allocator, &.{p, ".config", "ncdu", "config"}) catch unreachable;
|
||||
defer allocator.free(path);
|
||||
tryReadArgsFile(path);
|
||||
}
|
||||
}
|
||||
|
||||
var scan_dir: ?[:0]const u8 = null;
|
||||
var import_file: ?[:0]const u8 = null;
|
||||
var export_json: ?[:0]const u8 = null;
|
||||
var export_bin: ?[:0]const u8 = null;
|
||||
var quit_after_scan = false;
|
||||
{
|
||||
const arglist = std.process.argsAlloc(allocator) catch unreachable;
|
||||
defer std.process.argsFree(allocator, arglist);
|
||||
var args = Args.init(arglist);
|
||||
_ = args.next() catch unreachable; // program name
|
||||
while (args.next() catch unreachable) |opt| {
|
||||
if (!opt.opt) {
|
||||
// XXX: ncdu 1.x doesn't error, it just silently ignores all but the last argument.
|
||||
if (scan_dir != null) ui.die("Multiple directories given, see ncdu -h for help.\n", .{});
|
||||
scan_dir = allocator.dupeZ(u8, opt.val) catch unreachable;
|
||||
continue;
|
||||
}
|
||||
if (opt.is("-h") or opt.is("-?") or opt.is("--help")) help()
|
||||
else if (opt.is("-v") or opt.is("-V") or opt.is("--version")) version()
|
||||
else if (opt.is("-o") and (export_json != null or export_bin != null)) ui.die("The -o flag can only be given once.\n", .{})
|
||||
else if (opt.is("-o")) export_json = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-O") and (export_json != null or export_bin != null)) ui.die("The -O flag can only be given once.\n", .{})
|
||||
else if (opt.is("-O")) export_bin = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
|
||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("--ignore-config")) {}
|
||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||
else if (argConfig(&args, opt, false)) |_| {}
|
||||
else |_| ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
}
|
||||
}
|
||||
|
||||
if (config.threads == 0) config.threads = std.Thread.getCpuCount() catch 1;
|
||||
|
||||
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
||||
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
||||
|
||||
const out_tty = stdout.isTty();
|
||||
const in_tty = stdin.isTty();
|
||||
if (config.scan_ui == null) {
|
||||
if (export_json orelse export_bin) |f| {
|
||||
if (!out_tty or std.mem.eql(u8, f, "-")) config.scan_ui = .none
|
||||
else config.scan_ui = .line;
|
||||
} else config.scan_ui = .full;
|
||||
}
|
||||
if (!in_tty and import_file == null and export_json == null and export_bin == null and !quit_after_scan)
|
||||
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
|
||||
config.nc_tty = !in_tty or (if (export_json orelse export_bin) |f| std.mem.eql(u8, f, "-") else false);
|
||||
|
||||
event_delay_timer = std.time.Timer.start() catch unreachable;
|
||||
defer ui.deinit();
|
||||
|
||||
if (export_json) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
json_export.setupOutput(file);
|
||||
sink.global.sink = .json;
|
||||
} else if (export_bin) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
bin_export.setupOutput(file);
|
||||
sink.global.sink = .bin;
|
||||
}
|
||||
|
||||
if (import_file) |f| {
|
||||
readImport(f) catch |e| ui.die("Error reading file '{s}': {s}.\n", .{f, ui.errorString(e)});
|
||||
config.imported = true;
|
||||
if (config.binreader and (export_json != null or export_bin != null))
|
||||
bin_reader.import();
|
||||
} else {
|
||||
var buf: [std.fs.max_path_bytes+1]u8 = @splat(0);
|
||||
const path =
|
||||
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
||||
else |_| (scan_dir orelse ".");
|
||||
scan.scan(path) catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
}
|
||||
if (quit_after_scan or export_json != null or export_bin != null) return;
|
||||
|
||||
config.can_shell = config.can_shell orelse !config.imported;
|
||||
config.can_delete = config.can_delete orelse !config.imported;
|
||||
config.can_refresh = config.can_refresh orelse !config.imported;
|
||||
|
||||
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
|
||||
ui.init();
|
||||
state = .browse;
|
||||
browser.initRoot();
|
||||
|
||||
while (true) {
|
||||
switch (state) {
|
||||
.refresh => {
|
||||
var full_path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer full_path.deinit(allocator);
|
||||
mem_sink.global.root.?.fmtPath(allocator, true, &full_path);
|
||||
scan.scan(util.arrayListBufZ(&full_path, allocator)) catch {
|
||||
sink.global.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable;
|
||||
sink.global.state = .err;
|
||||
while (state == .refresh) handleEvent(true, true);
|
||||
};
|
||||
state = .browse;
|
||||
browser.loadDir(0);
|
||||
},
|
||||
.shell => {
|
||||
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
ui.runCmd(&.{shell}, browser.dir_path, &env, false);
|
||||
state = .browse;
|
||||
},
|
||||
.delete => {
|
||||
const next = delete.delete();
|
||||
if (state != .refresh) {
|
||||
state = .browse;
|
||||
browser.loadDir(if (next) |n| n.nameHash() else 0);
|
||||
}
|
||||
},
|
||||
else => handleEvent(true, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub var event_delay_timer: std.time.Timer = undefined;
|
||||
|
||||
// Draw the screen and handle the next input event.
|
||||
// In non-blocking mode, screen drawing is rate-limited to keep this function fast.
|
||||
pub fn handleEvent(block: bool, force_draw: bool) void {
|
||||
while (ui.oom_threads.load(.monotonic) > 0) ui.oom();
|
||||
|
||||
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
||||
if (ui.inited) _ = c.erase();
|
||||
switch (state) {
|
||||
.scan, .refresh => sink.draw(),
|
||||
.browse => browser.draw(),
|
||||
.delete => delete.draw(),
|
||||
.shell => unreachable,
|
||||
}
|
||||
if (ui.inited) _ = c.refresh();
|
||||
event_delay_timer.reset();
|
||||
}
|
||||
if (!ui.inited) {
|
||||
std.debug.assert(!block);
|
||||
return;
|
||||
}
|
||||
|
||||
var firstblock = block;
|
||||
while (true) {
|
||||
const ch = ui.getch(firstblock);
|
||||
if (ch == 0) return;
|
||||
if (ch == -1) return handleEvent(firstblock, true);
|
||||
switch (state) {
|
||||
.scan, .refresh => sink.keyInput(ch),
|
||||
.browse => browser.keyInput(ch),
|
||||
.delete => delete.keyInput(ch),
|
||||
.shell => unreachable,
|
||||
}
|
||||
firstblock = false;
|
||||
}
|
||||
}
|
||||
|
||||
test "argument parser" {
|
||||
const lst = [_][:0]const u8{ "a", "-abcd=e", "--opt1=arg1", "--opt2", "arg2", "-x", "foo", "", "--", "--arg", "", "-", };
|
||||
const T = struct {
|
||||
a: Args,
|
||||
fn opt(self: *@This(), isopt: bool, val: []const u8) !void {
|
||||
const o = (self.a.next() catch unreachable).?;
|
||||
try std.testing.expectEqual(isopt, o.opt);
|
||||
try std.testing.expectEqualStrings(val, o.val);
|
||||
try std.testing.expectEqual(o.is(val), isopt);
|
||||
}
|
||||
fn arg(self: *@This(), val: []const u8) !void {
|
||||
try std.testing.expectEqualStrings(val, self.a.arg() catch unreachable);
|
||||
}
|
||||
};
|
||||
var t = T{ .a = Args.init(&lst) };
|
||||
try t.opt(false, "a");
|
||||
try t.opt(true, "-a");
|
||||
try t.opt(true, "-b");
|
||||
try t.arg("cd=e");
|
||||
try t.opt(true, "--opt1");
|
||||
try t.arg("arg1");
|
||||
try t.opt(true, "--opt2");
|
||||
try t.arg("arg2");
|
||||
try t.opt(true, "-x");
|
||||
try t.arg("foo");
|
||||
try t.opt(false, "");
|
||||
try t.opt(false, "--arg");
|
||||
try t.opt(false, "");
|
||||
try t.opt(false, "-");
|
||||
try std.testing.expectEqual(t.a.next(), null);
|
||||
}
|
||||
212
src/mem_sink.zig
Normal file
212
src/mem_sink.zig
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var root: ?*model.Dir = null;
|
||||
pub var stats: bool = true; // calculate aggregate directory stats
|
||||
};
|
||||
|
||||
pub const Thread = struct {
|
||||
// Arena allocator for model.Entry structs, these are never freed.
|
||||
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
||||
};
|
||||
|
||||
pub fn statToEntry(stat: *const sink.Stat, e: *model.Entry, parent: *model.Dir) void {
|
||||
e.pack.blocks = stat.blocks;
|
||||
e.size = stat.size;
|
||||
if (e.dir()) |d| {
|
||||
d.parent = parent;
|
||||
d.pack.dev = model.devices.getId(stat.dev);
|
||||
}
|
||||
if (e.link()) |l| {
|
||||
l.parent = parent;
|
||||
l.ino = stat.ino;
|
||||
l.pack.nlink = stat.nlink;
|
||||
model.inodes.lock.lock();
|
||||
defer model.inodes.lock.unlock();
|
||||
l.addLink();
|
||||
}
|
||||
if (e.ext()) |ext| ext.* = stat.ext;
|
||||
}
|
||||
|
||||
pub const Dir = struct {
|
||||
dir: *model.Dir,
|
||||
entries: Map,
|
||||
|
||||
own_blocks: model.Blocks,
|
||||
own_bytes: u64,
|
||||
|
||||
// Additional counts collected from subdirectories. Subdirs may run final()
|
||||
// from separate threads so these need to be protected.
|
||||
blocks: model.Blocks = 0,
|
||||
bytes: u64 = 0,
|
||||
items: u32 = 0,
|
||||
mtime: u64 = 0,
|
||||
suberr: bool = false,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
|
||||
const Map = std.HashMap(*model.Entry, void, HashContext, 80);
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), e: *model.Entry) u64 {
|
||||
return std.hash.Wyhash.hash(0, e.name());
|
||||
}
|
||||
pub fn eql(_: @This(), a: *model.Entry, b: *model.Entry) bool {
|
||||
return a == b or std.mem.eql(u8, a.name(), b.name());
|
||||
}
|
||||
};
|
||||
|
||||
const HashContextAdapted = struct {
|
||||
pub fn hash(_: @This(), v: []const u8) u64 {
|
||||
return std.hash.Wyhash.hash(0, v);
|
||||
}
|
||||
pub fn eql(_: @This(), a: []const u8, b: *model.Entry) bool {
|
||||
return std.mem.eql(u8, a, b.name());
|
||||
}
|
||||
};
|
||||
|
||||
fn init(dir: *model.Dir) Dir {
|
||||
var self = Dir{
|
||||
.dir = dir,
|
||||
.entries = Map.initContext(main.allocator, HashContext{}),
|
||||
.own_blocks = dir.entry.pack.blocks,
|
||||
.own_bytes = dir.entry.size,
|
||||
};
|
||||
|
||||
var count: Map.Size = 0;
|
||||
var it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) count += 1;
|
||||
self.entries.ensureUnusedCapacity(count) catch unreachable;
|
||||
|
||||
it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr)
|
||||
self.entries.putAssumeCapacity(e, {});
|
||||
return self;
|
||||
}
|
||||
|
||||
fn getEntry(self: *Dir, t: *Thread, etype: model.EType, isext: bool, name: []const u8) *model.Entry {
|
||||
if (self.entries.getKeyAdapted(name, HashContextAdapted{})) |e| {
|
||||
// XXX: In-place conversion may be possible in some cases.
|
||||
if (e.pack.etype.base() == etype.base() and (!isext or e.pack.isext)) {
|
||||
e.pack.etype = etype;
|
||||
e.pack.isext = isext;
|
||||
_ = self.entries.removeAdapted(name, HashContextAdapted{});
|
||||
return e;
|
||||
}
|
||||
}
|
||||
const e = model.Entry.create(t.arena.allocator(), etype, isext, name);
|
||||
e.next.ptr = self.dir.sub.ptr;
|
||||
self.dir.sub.ptr = e;
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addSpecial(self: *Dir, t: *Thread, name: []const u8, st: model.EType) void {
|
||||
self.dir.items += 1;
|
||||
if (st == .err) self.dir.pack.suberr = true;
|
||||
_ = self.getEntry(t, st, false, name);
|
||||
}
|
||||
|
||||
pub fn addStat(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) *model.Entry {
|
||||
if (global.stats) {
|
||||
self.dir.items +|= 1;
|
||||
if (stat.etype != .link) {
|
||||
self.dir.entry.pack.blocks +|= stat.blocks;
|
||||
self.dir.entry.size +|= stat.size;
|
||||
}
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (stat.ext.mtime > e.mtime) e.mtime = stat.ext.mtime;
|
||||
}
|
||||
}
|
||||
|
||||
const e = self.getEntry(t, stat.etype, main.config.extended and !stat.ext.isEmpty(), name);
|
||||
statToEntry(stat, e, self.dir);
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addDir(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
return init(self.addStat(t, name, stat).dir().?);
|
||||
}
|
||||
|
||||
pub fn setReadError(self: *Dir) void {
|
||||
self.dir.pack.err = true;
|
||||
}
|
||||
|
||||
pub fn final(self: *Dir, parent: ?*Dir) void {
|
||||
// Remove entries we've not seen
|
||||
if (self.entries.count() > 0) {
|
||||
var it = &self.dir.sub.ptr;
|
||||
while (it.*) |e| {
|
||||
if (self.entries.getKey(e) == e) it.* = e.next.ptr
|
||||
else it = &e.next.ptr;
|
||||
}
|
||||
}
|
||||
self.entries.deinit();
|
||||
|
||||
if (!global.stats) return;
|
||||
|
||||
// Grab counts collected from subdirectories
|
||||
self.dir.entry.pack.blocks +|= self.blocks;
|
||||
self.dir.entry.size +|= self.bytes;
|
||||
self.dir.items +|= self.items;
|
||||
if (self.suberr) self.dir.pack.suberr = true;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (self.mtime > e.mtime) e.mtime = self.mtime;
|
||||
}
|
||||
|
||||
// Add own counts to parent
|
||||
if (parent) |p| {
|
||||
p.lock.lock();
|
||||
defer p.lock.unlock();
|
||||
p.blocks +|= self.dir.entry.pack.blocks - self.own_blocks;
|
||||
p.bytes +|= self.dir.entry.size - self.own_bytes;
|
||||
p.items +|= self.dir.items;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (e.mtime > p.mtime) p.mtime = e.mtime;
|
||||
}
|
||||
if (self.suberr or self.dir.pack.suberr or self.dir.pack.err) p.suberr = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
const p = global.root orelse blk: {
|
||||
model.root = model.Entry.create(main.allocator, .dir, main.config.extended and !stat.ext.isEmpty(), path).dir().?;
|
||||
break :blk model.root;
|
||||
};
|
||||
sink.global.state = .zeroing;
|
||||
if (p.items > 10_000) main.handleEvent(false, true);
|
||||
// Do the zeroStats() here, after the "root" entry has been
|
||||
// stat'ed and opened, so that a fatal error on refresh won't
|
||||
// zero-out the requested directory.
|
||||
p.entry.zeroStats(p.parent);
|
||||
sink.global.state = .running;
|
||||
p.entry.pack.blocks = stat.blocks;
|
||||
p.entry.size = stat.size;
|
||||
p.pack.dev = model.devices.getId(stat.dev);
|
||||
if (p.entry.ext()) |e| e.* = stat.ext;
|
||||
return Dir.init(p);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
if (!global.stats) return;
|
||||
|
||||
sink.global.state = .hlcnt;
|
||||
main.handleEvent(false, true);
|
||||
const dir = global.root orelse model.root;
|
||||
var it: ?*model.Dir = dir;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.updateSubErr();
|
||||
if (p != dir) {
|
||||
p.entry.pack.blocks +|= dir.entry.pack.blocks;
|
||||
p.entry.size +|= dir.entry.size;
|
||||
p.items +|= dir.items + 1;
|
||||
}
|
||||
}
|
||||
model.inodes.addAllStats();
|
||||
}
|
||||
73
src/mem_src.zig
Normal file
73
src/mem_src.zig
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
// Emit the memory tree to the sink in depth-first order from a single thread,
|
||||
// suitable for JSON export.
|
||||
|
||||
fn toStat(e: *model.Entry) sink.Stat {
|
||||
const el = e.link();
|
||||
return sink.Stat{
|
||||
.etype = e.pack.etype,
|
||||
.blocks = e.pack.blocks,
|
||||
.size = e.size,
|
||||
.dev =
|
||||
if (e.dir()) |d| model.devices.list.items[d.pack.dev]
|
||||
else if (el) |l| model.devices.list.items[l.parent.pack.dev]
|
||||
else undefined,
|
||||
.ino = if (el) |l| l.ino else undefined,
|
||||
.nlink = if (el) |l| l.pack.nlink else 1,
|
||||
.ext = if (e.ext()) |x| x.* else .{},
|
||||
};
|
||||
}
|
||||
|
||||
const Ctx = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat,
|
||||
};
|
||||
|
||||
|
||||
fn rec(ctx: *Ctx, dir: *sink.Dir, entry: *model.Entry) void {
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
|
||||
ctx.stat = toStat(entry);
|
||||
switch (entry.pack.etype) {
|
||||
.dir => {
|
||||
const d = entry.dir().?;
|
||||
var ndir = dir.addDir(ctx.sink, entry.name(), &ctx.stat);
|
||||
ctx.sink.setDir(ndir);
|
||||
if (d.pack.err) ndir.setReadError(ctx.sink);
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(ctx, ndir, e);
|
||||
ctx.sink.setDir(dir);
|
||||
ndir.unref(ctx.sink);
|
||||
},
|
||||
.reg, .nonreg, .link => dir.addStat(ctx.sink, entry.name(), &ctx.stat),
|
||||
else => dir.addSpecial(ctx.sink, entry.name(), entry.pack.etype),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn run(d: *model.Dir) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
|
||||
var ctx: Ctx = .{
|
||||
.sink = &sink_threads[0],
|
||||
.stat = toStat(&d.entry),
|
||||
};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
d.fmtPath(main.allocator, true, &buf);
|
||||
const root = sink.createRoot(buf.items, &ctx.stat);
|
||||
buf.deinit(main.allocator);
|
||||
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
|
||||
|
||||
root.unref(ctx.sink);
|
||||
sink.done();
|
||||
}
|
||||
513
src/model.zig
Normal file
513
src/model.zig
Normal file
|
|
@ -0,0 +1,513 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Numbers are used in the binfmt export, so must be stable.
|
||||
pub const EType = enum(i3) {
|
||||
dir = 0,
|
||||
reg = 1,
|
||||
nonreg = 2,
|
||||
link = 3,
|
||||
err = -1,
|
||||
pattern = -2,
|
||||
otherfs = -3,
|
||||
kernfs = -4,
|
||||
|
||||
pub fn base(t: EType) EType {
|
||||
return switch (t) {
|
||||
.dir, .link => t,
|
||||
else => .reg,
|
||||
};
|
||||
}
|
||||
|
||||
// Whether this entry should be displayed as a "directory".
|
||||
// Some dirs are actually represented in this data model as a File for efficiency.
|
||||
pub fn isDirectory(t: EType) bool {
|
||||
return switch (t) {
|
||||
.dir, .otherfs, .kernfs => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags.
|
||||
pub const Blocks = u60;
|
||||
|
||||
// Entries read from bin_reader may refer to other entries by itemref rather than pointer.
|
||||
// This is a hack that allows browser.zig to use the same types for in-memory
|
||||
// and bin_reader-backed directory trees. Most code can only deal with
|
||||
// in-memory trees and accesses the .ptr field directly.
|
||||
pub const Ref = extern union {
|
||||
ptr: ?*Entry align(1),
|
||||
ref: u64 align(1),
|
||||
|
||||
pub fn isNull(r: Ref) bool {
|
||||
if (main.config.binreader) return r.ref == std.math.maxInt(u64)
|
||||
else return r.ptr == null;
|
||||
}
|
||||
};
|
||||
|
||||
// Memory layout:
|
||||
// (Ext +) Dir + name
|
||||
// or: (Ext +) Link + name
|
||||
// or: (Ext +) File + name
|
||||
//
|
||||
// Entry is always the first part of Dir, Link and File, so a pointer cast to
|
||||
// *Entry is always safe and an *Entry can be casted to the full type. The Ext
|
||||
// struct, if present, is placed before the *Entry pointer.
|
||||
// These are all packed structs and hence do not have any alignment, which is
|
||||
// great for saving memory but perhaps not very great for code size or
|
||||
// performance.
|
||||
pub const Entry = extern struct {
|
||||
pack: Packed align(1),
|
||||
size: u64 align(1) = 0,
|
||||
next: Ref = .{ .ptr = null },
|
||||
|
||||
pub const Packed = packed struct(u64) {
|
||||
etype: EType,
|
||||
isext: bool,
|
||||
blocks: Blocks = 0, // 512-byte blocks
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn dir(self: *Self) ?*Dir {
|
||||
return if (self.pack.etype == .dir) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn link(self: *Self) ?*Link {
|
||||
return if (self.pack.etype == .link) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn file(self: *Self) ?*File {
|
||||
return if (self.pack.etype != .dir and self.pack.etype != .link) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn name(self: *const Self) [:0]const u8 {
|
||||
const self_name = switch (self.pack.etype) {
|
||||
.dir => &@as(*const Dir, @ptrCast(self)).name,
|
||||
.link => &@as(*const Link, @ptrCast(self)).name,
|
||||
else => &@as(*const File, @ptrCast(self)).name,
|
||||
};
|
||||
const name_ptr: [*:0]const u8 = @ptrCast(self_name);
|
||||
return std.mem.sliceTo(name_ptr, 0);
|
||||
}
|
||||
|
||||
pub fn nameHash(self: *const Self) u64 {
|
||||
return std.hash.Wyhash.hash(0, self.name());
|
||||
}
|
||||
|
||||
pub fn ext(self: *Self) ?*Ext {
|
||||
if (!self.pack.isext) return null;
|
||||
return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1);
|
||||
}
|
||||
|
||||
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
||||
var ptr = blk: while (true) {
|
||||
const alignment = if (@typeInfo(@TypeOf(std.mem.Allocator.allocWithOptions)).@"fn".params[3].type == ?u29) 1 else std.mem.Alignment.@"1";
|
||||
if (allocator.allocWithOptions(u8, size, alignment, null)) |p| break :blk p
|
||||
else |_| {}
|
||||
ui.oom();
|
||||
};
|
||||
if (isext) {
|
||||
@as(*Ext, @ptrCast(ptr)).* = .{};
|
||||
ptr = ptr[@sizeOf(Ext)..];
|
||||
}
|
||||
const e: *T = @ptrCast(ptr);
|
||||
e.* = .{ .entry = .{ .pack = .{ .etype = etype, .isext = isext } } };
|
||||
const n = @as([*]u8, @ptrCast(&e.name))[0..ename.len+1];
|
||||
@memcpy(n[0..ename.len], ename);
|
||||
n[ename.len] = 0;
|
||||
return &e.entry;
|
||||
}
|
||||
|
||||
pub fn create(allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
return switch (etype) {
|
||||
.dir => alloc(Dir, allocator, etype, isext, ename),
|
||||
.link => alloc(Link, allocator, etype, isext, ename),
|
||||
else => alloc(File, allocator, etype, isext, ename),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Self, allocator: std.mem.Allocator) void {
|
||||
const ptr: [*]u8 = if (self.ext()) |e| @ptrCast(e) else @ptrCast(self);
|
||||
const esize: usize = switch (self.pack.etype) {
|
||||
.dir => @sizeOf(Dir),
|
||||
.link => @sizeOf(Link),
|
||||
else => @sizeOf(File),
|
||||
};
|
||||
const size = (if (self.pack.isext) @as(usize, @sizeOf(Ext)) else 0) + esize + self.name().len + 1;
|
||||
allocator.free(ptr[0..size]);
|
||||
}
|
||||
|
||||
fn hasErr(self: *Self) bool {
|
||||
return
|
||||
if(self.dir()) |d| d.pack.err or d.pack.suberr
|
||||
else self.pack.etype == .err;
|
||||
}
|
||||
|
||||
fn removeLinks(self: *Entry) void {
|
||||
if (self.dir()) |d| {
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.removeLinks();
|
||||
}
|
||||
if (self.link()) |l| l.removeLink();
|
||||
}
|
||||
|
||||
fn zeroStatsRec(self: *Entry) void {
|
||||
self.pack.blocks = 0;
|
||||
self.size = 0;
|
||||
if (self.dir()) |d| {
|
||||
d.items = 0;
|
||||
d.pack.err = false;
|
||||
d.pack.suberr = false;
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.zeroStatsRec();
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively set stats and those of sub-items to zero and removes counts
|
||||
// from parent directories; as if this item does not exist in the tree.
|
||||
// XXX: Does not update the 'suberr' flag of parent directories, make sure
|
||||
// to call updateSubErr() afterwards.
|
||||
pub fn zeroStats(self: *Entry, parent: ?*Dir) void {
|
||||
self.removeLinks();
|
||||
|
||||
var it = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.entry.pack.blocks -|= self.pack.blocks;
|
||||
p.entry.size -|= self.size;
|
||||
p.items -|= 1 + (if (self.dir()) |d| d.items else 0);
|
||||
}
|
||||
self.zeroStatsRec();
|
||||
}
|
||||
};
|
||||
|
||||
const DevId = u30; // Can be reduced to make room for more flags in Dir.Packed.
|
||||
|
||||
pub const Dir = extern struct {
|
||||
entry: Entry,
|
||||
|
||||
sub: Ref = .{ .ptr = null },
|
||||
parent: ?*Dir align(1) = null,
|
||||
|
||||
// entry.{blocks,size}: Total size of all unique files + dirs. Non-shared hardlinks are counted only once.
|
||||
// (i.e. the space you'll need if you created a filesystem with only this dir)
|
||||
// shared_*: Unique hardlinks that still have references outside of this directory.
|
||||
// (i.e. the space you won't reclaim by deleting this dir)
|
||||
// (space reclaimed by deleting a dir =~ entry. - shared_)
|
||||
shared_blocks: u64 align(1) = 0,
|
||||
shared_size: u64 align(1) = 0,
|
||||
items: u32 align(1) = 0,
|
||||
|
||||
pack: Packed align(1) = .{},
|
||||
|
||||
// Only used to find the @offsetOff, the name is written at this point as a 0-terminated string.
|
||||
// (Old C habits die hard)
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
pub const Packed = packed struct {
|
||||
// Indexes into the global 'devices.list' array
|
||||
dev: DevId = 0,
|
||||
err: bool = false,
|
||||
suberr: bool = false,
|
||||
};
|
||||
|
||||
pub fn fmtPath(self: *const @This(), alloc: std.mem.Allocator, withRoot: bool, out: *std.ArrayListUnmanaged(u8)) void {
|
||||
if (!withRoot and self.parent == null) return;
|
||||
var components: std.ArrayListUnmanaged([:0]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*const @This() = self;
|
||||
while (it) |e| : (it = e.parent)
|
||||
if (withRoot or e.parent != null)
|
||||
components.append(main.allocator, e.entry.name()) catch unreachable;
|
||||
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(alloc, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Only updates the suberr of this Dir, assumes child dirs have already
|
||||
// been updated and does not propagate to parents.
|
||||
pub fn updateSubErr(self: *@This()) void {
|
||||
self.pack.suberr = false;
|
||||
var sub = self.sub.ptr;
|
||||
while (sub) |e| : (sub = e.next.ptr) {
|
||||
if (e.hasErr()) {
|
||||
self.pack.suberr = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// File that's been hardlinked (i.e. nlink > 1)
|
||||
pub const Link = extern struct {
|
||||
entry: Entry,
|
||||
parent: *Dir align(1) = undefined,
|
||||
next: *Link align(1) = undefined, // circular linked list of all *Link nodes with the same dev,ino.
|
||||
prev: *Link align(1) = undefined,
|
||||
// dev is inherited from the parent Dir
|
||||
ino: u64 align(1) = undefined,
|
||||
pack: Pack align(1) = .{},
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
const Pack = packed struct(u32) {
|
||||
// Whether this Inode is counted towards the parent directories.
|
||||
// Is kept synchronized between all Link nodes with the same dev/ino.
|
||||
counted: bool = false,
|
||||
// Number of links for this inode. When set to '0', we don't know the
|
||||
// actual nlink count; which happens for old JSON dumps.
|
||||
nlink: u31 = undefined,
|
||||
};
|
||||
|
||||
// Return value should be freed with main.allocator.
|
||||
pub fn path(self: *const @This(), withRoot: bool) [:0]const u8 {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
self.parent.fmtPath(main.allocator, withRoot, &out);
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, self.entry.name()) catch unreachable;
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
// Add this link to the inodes map and mark it as 'uncounted'.
|
||||
pub fn addLink(l: *@This()) void {
|
||||
const d = inodes.map.getOrPut(l) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
l.next = l;
|
||||
l.prev = l;
|
||||
} else {
|
||||
inodes.setStats(d.key_ptr.*, false);
|
||||
l.next = d.key_ptr.*;
|
||||
l.prev = d.key_ptr.*.prev;
|
||||
l.next.prev = l;
|
||||
l.prev.next = l;
|
||||
}
|
||||
inodes.addUncounted(l);
|
||||
}
|
||||
|
||||
// Remove this link from the inodes map and remove its stats from parent directories.
|
||||
fn removeLink(l: *@This()) void {
|
||||
inodes.setStats(l, false);
|
||||
const entry = inodes.map.getEntry(l) orelse return;
|
||||
if (l.next == l) {
|
||||
_ = inodes.map.remove(l);
|
||||
_ = inodes.uncounted.remove(l);
|
||||
} else {
|
||||
// XXX: If this link is actually removed from the filesystem, then
|
||||
// the nlink count of the existing links should be updated to
|
||||
// reflect that. But we can't do that here, because this function
|
||||
// is also called before doing a filesystem refresh - in which case
|
||||
// the nlink count likely won't change. Best we can hope for is
|
||||
// that a refresh will encounter another link to the same inode and
|
||||
// trigger an nlink change.
|
||||
if (entry.key_ptr.* == l)
|
||||
entry.key_ptr.* = l.next;
|
||||
inodes.addUncounted(l.next);
|
||||
l.next.prev = l.prev;
|
||||
l.prev.next = l.next;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Anything that's not an (indexed) directory or hardlink. Excluded directories are also "Files".
|
||||
pub const File = extern struct {
|
||||
entry: Entry,
|
||||
name: [0]u8 = undefined,
|
||||
};
|
||||
|
||||
pub const Ext = extern struct {
|
||||
pack: Pack = .{},
|
||||
mtime: u64 align(1) = 0,
|
||||
uid: u32 align(1) = 0,
|
||||
gid: u32 align(1) = 0,
|
||||
mode: u16 align(1) = 0,
|
||||
|
||||
pub const Pack = packed struct(u8) {
|
||||
hasmtime: bool = false,
|
||||
hasuid: bool = false,
|
||||
hasgid: bool = false,
|
||||
hasmode: bool = false,
|
||||
_pad: u4 = 0,
|
||||
};
|
||||
|
||||
pub fn isEmpty(e: *const Ext) bool {
|
||||
return !e.pack.hasmtime and !e.pack.hasuid and !e.pack.hasgid and !e.pack.hasmode;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// List of st_dev entries. Those are typically 64bits, but that's quite a waste
|
||||
// of space when a typical scan won't cover many unique devices.
|
||||
pub const devices = struct {
|
||||
var lock = std.Thread.Mutex{};
|
||||
// id -> dev
|
||||
pub var list: std.ArrayListUnmanaged(u64) = .empty;
|
||||
// dev -> id
|
||||
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||
|
||||
pub fn getId(dev: u64) DevId {
|
||||
lock.lock();
|
||||
defer lock.unlock();
|
||||
const d = lookup.getOrPut(dev) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
if (list.items.len >= std.math.maxInt(DevId)) ui.die("Maximum number of device identifiers exceeded.\n", .{});
|
||||
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
||||
list.append(main.allocator, dev) catch unreachable;
|
||||
}
|
||||
return d.value_ptr.*;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Lookup table for ino -> *Link entries, used for hard link counting.
|
||||
pub const inodes = struct {
|
||||
// Keys are hashed by their (dev,ino), the *Link points to an arbitrary
|
||||
// node in the list. Link entries with the same dev/ino are part of a
|
||||
// circular linked list, so you can iterate through all of them with this
|
||||
// single pointer.
|
||||
const Map = std.HashMap(*Link, void, HashContext, 80);
|
||||
pub var map = Map.init(main.allocator);
|
||||
|
||||
// List of nodes in 'map' with !counted, to speed up addAllStats().
|
||||
// If this list grows large relative to the number of nodes in 'map', then
|
||||
// this list is cleared and uncounted_full is set instead, so that
|
||||
// addAllStats() will do a full iteration over 'map'.
|
||||
var uncounted = std.HashMap(*Link, void, HashContext, 80).init(main.allocator);
|
||||
var uncounted_full = true; // start with true for the initial scan
|
||||
|
||||
pub var lock = std.Thread.Mutex{};
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), l: *Link) u64 {
|
||||
var h = std.hash.Wyhash.init(0);
|
||||
h.update(std.mem.asBytes(&@as(u32, l.parent.pack.dev)));
|
||||
h.update(std.mem.asBytes(&l.ino));
|
||||
return h.final();
|
||||
}
|
||||
|
||||
pub fn eql(_: @This(), a: *Link, b: *Link) bool {
|
||||
return a.ino == b.ino and a.parent.pack.dev == b.parent.pack.dev;
|
||||
}
|
||||
};
|
||||
|
||||
fn addUncounted(l: *Link) void {
|
||||
if (uncounted_full) return;
|
||||
if (uncounted.count() > map.count()/8) {
|
||||
uncounted.clearAndFree();
|
||||
uncounted_full = true;
|
||||
} else
|
||||
(uncounted.getOrPut(l) catch unreachable).key_ptr.* = l;
|
||||
}
|
||||
|
||||
// Add/remove this inode from the parent Dir sizes. When removing stats,
|
||||
// the list of *Links and their sizes and counts must be in the exact same
|
||||
// state as when the stats were added. Hence, any modification to the Link
|
||||
// state should be preceded by a setStats(.., false).
|
||||
fn setStats(l: *Link, add: bool) void {
|
||||
if (l.pack.counted == add) return;
|
||||
|
||||
var nlink: u31 = 0;
|
||||
var inconsistent = false;
|
||||
var dirs = std.AutoHashMap(*Dir, u32).init(main.allocator);
|
||||
defer dirs.deinit();
|
||||
var it = l;
|
||||
while (true) {
|
||||
it.pack.counted = add;
|
||||
nlink += 1;
|
||||
if (it.pack.nlink != l.pack.nlink) inconsistent = true;
|
||||
var parent: ?*Dir = it.parent;
|
||||
while (parent) |p| : (parent = p.parent) {
|
||||
const de = dirs.getOrPut(p) catch unreachable;
|
||||
if (de.found_existing) de.value_ptr.* += 1
|
||||
else de.value_ptr.* = 1;
|
||||
}
|
||||
it = it.next;
|
||||
if (it == l)
|
||||
break;
|
||||
}
|
||||
|
||||
// There's not many sensible things we can do when we encounter
|
||||
// inconsistent nlink counts. Current approach is to use the number of
|
||||
// times we've seen this link in our tree as fallback for when the
|
||||
// nlink counts aren't matching. May want to add a warning of some
|
||||
// sorts to the UI at some point.
|
||||
if (!inconsistent and l.pack.nlink >= nlink) nlink = l.pack.nlink;
|
||||
|
||||
// XXX: We're also not testing for inconsistent entry sizes, instead
|
||||
// using the given 'l' size for all Links. Might warrant a warning as
|
||||
// well.
|
||||
|
||||
var dir_iter = dirs.iterator();
|
||||
if (add) {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.pack.blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size +|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size +|= l.entry.size;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.pack.blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size -|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size -|= l.entry.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// counters to track progress for addAllStats()
|
||||
pub var add_total: usize = 0;
|
||||
pub var add_done: usize = 0;
|
||||
|
||||
pub fn addAllStats() void {
|
||||
if (uncounted_full) {
|
||||
add_total = map.count();
|
||||
add_done = 0;
|
||||
var it = map.keyIterator();
|
||||
while (it.next()) |e| {
|
||||
setStats(e.*, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
} else {
|
||||
add_total = uncounted.count();
|
||||
add_done = 0;
|
||||
var it = uncounted.keyIterator();
|
||||
while (it.next()) |u| {
|
||||
if (map.getKey(u.*)) |e| setStats(e, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
}
|
||||
uncounted_full = false;
|
||||
if (uncounted.count() > 0)
|
||||
uncounted.clearAndFree();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub var root: *Dir = undefined;
|
||||
|
||||
|
||||
test "entry" {
|
||||
var e = Entry.create(std.testing.allocator, .reg, false, "hello");
|
||||
defer e.destroy(std.testing.allocator);
|
||||
try std.testing.expectEqual(e.pack.etype, .reg);
|
||||
try std.testing.expect(!e.pack.isext);
|
||||
try std.testing.expectEqualStrings(e.name(), "hello");
|
||||
}
|
||||
160
src/ncdu.h
160
src/ncdu.h
|
|
@ -1,160 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
#include <fnmatch.h>
|
||||
|
||||
#include <ncurses.h>
|
||||
#include <form.h>
|
||||
#include <menu.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <dirent.h>
|
||||
|
||||
/* PATH_MAX 260 on Cygwin is too small for /proc/registry */
|
||||
#ifdef __CYGWIN__
|
||||
# if PATH_MAX < 1024
|
||||
# undef PATH_MAX
|
||||
# define PATH_MAX 1024
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* get PATH_MAX */
|
||||
#ifndef PATH_MAX
|
||||
# ifdef _POSIX_PATH_MAX
|
||||
# define PATH_MAX _POSIX_PATH_MAX
|
||||
# else
|
||||
# define PATH_MAX 4096
|
||||
# endif
|
||||
#endif
|
||||
/* and LINK_MAX */
|
||||
#ifndef LINK_MAX
|
||||
# ifdef _POSIX_LINK_MAX
|
||||
# define LINK_MAX _POSIX_LINK_MAX
|
||||
# else
|
||||
# define LINK_MAX 32
|
||||
# endif
|
||||
#endif
|
||||
/* check for S_ISLNK */
|
||||
#ifndef S_ISLNK
|
||||
# ifndef S_IFLNK
|
||||
# define S_IFLNK 0120000
|
||||
# endif
|
||||
# define S_ISLNK(x) (x & S_IFLNK)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* G L O B A L F L A G S
|
||||
*/
|
||||
/* File Flags (struct dir -> flags) */
|
||||
#define FF_DIR 1
|
||||
#define FF_FILE 2
|
||||
#define FF_OTHER 4
|
||||
#define FF_ERR 8
|
||||
#define FF_OTHFS 16
|
||||
#define FF_SERR 32 /* error in subdirectory */
|
||||
#define FF_BSEL 64 /* selected */
|
||||
#define FF_EXL 128 /* excluded using exlude patterns */
|
||||
#define FF_PAR 256 /* reference to parent directory (hack) */
|
||||
|
||||
/* Settings Flags (int sflags) */
|
||||
#define SF_SMFS 1 /* same filesystem */
|
||||
#define SF_AS 2 /* apparent sizes */
|
||||
#define SF_SI 4 /* use powers of 1000 instead of 1024 */
|
||||
#define SF_IGNS 8 /* ignore too small terminal sizes */
|
||||
#define SF_NOCFM 16 /* don't confirm file deletion */
|
||||
#define SF_IGNE 32 /* ignore errors when deleting */
|
||||
|
||||
/* Browse Flags (int bflags) */
|
||||
#define BF_NAME 1
|
||||
#define BF_SIZE 2
|
||||
#define BF_NDIRF 32 /* Normally, dirs before files, setting this disables it */
|
||||
#define BF_DESC 64
|
||||
#define BF_HIDE 128 /* don't show hidden files... */
|
||||
|
||||
|
||||
/*
|
||||
* S T R U C T U R E S
|
||||
*/
|
||||
struct dir {
|
||||
struct dir *parent, *next, *prev, *sub;
|
||||
char *name;
|
||||
off_t size;
|
||||
unsigned int files, dirs;
|
||||
unsigned short flags;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* G L O B A L V A R I A B L E S
|
||||
*
|
||||
* (all defined in main.c)
|
||||
*/
|
||||
/* main directory data */
|
||||
extern struct dir dat;
|
||||
/* updated when window is resized */
|
||||
extern int winrows, wincols;
|
||||
/* global settings */
|
||||
extern char sdir[PATH_MAX];
|
||||
extern int sflags, bflags, sdelay, bgraph;
|
||||
|
||||
|
||||
/*
|
||||
* G L O B A L F U N C T I O N S
|
||||
*/
|
||||
/* util.c */
|
||||
extern char *cropdir(const char *, int);
|
||||
extern char *cropsize(const off_t);
|
||||
extern void ncresize(void);
|
||||
extern struct dir * freedir(struct dir *);
|
||||
extern char *getpath(struct dir *, char *);
|
||||
/* settings.c */
|
||||
extern int settingsCli(int, char **);
|
||||
extern int settingsWin(void);
|
||||
/* calc.c */
|
||||
extern int calcUsage();
|
||||
/* browser.c */
|
||||
extern void drawBrowser(int);
|
||||
extern void showBrowser(void);
|
||||
/* help.c */
|
||||
extern void showHelp(void);
|
||||
/* delete.c */
|
||||
extern struct dir *showDelete(struct dir *);
|
||||
/* exclude.c */
|
||||
extern void addExclude(char *);
|
||||
extern int addExcludeFile(char *);
|
||||
extern int matchExclude(char *);
|
||||
|
||||
325
src/scan.zig
Normal file
325
src/scan.zig
Normal file
|
|
@ -0,0 +1,325 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const exclude = @import("exclude.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
// This function only works on Linux
|
||||
fn isKernfs(dir: std.fs.Dir) bool {
|
||||
var buf: c.struct_statfs = undefined;
|
||||
if (c.fstatfs(dir.fd, &buf) != 0) return false; // silently ignoring errors isn't too nice.
|
||||
const iskern = switch (util.castTruncate(u32, buf.f_type)) {
|
||||
// These numbers are documented in the Linux 'statfs(2)' man page, so I assume they're stable.
|
||||
0x42494e4d, // BINFMTFS_MAGIC
|
||||
0xcafe4a11, // BPF_FS_MAGIC
|
||||
0x27e0eb, // CGROUP_SUPER_MAGIC
|
||||
0x63677270, // CGROUP2_SUPER_MAGIC
|
||||
0x64626720, // DEBUGFS_MAGIC
|
||||
0x1cd1, // DEVPTS_SUPER_MAGIC
|
||||
0x9fa0, // PROC_SUPER_MAGIC
|
||||
0x6165676c, // PSTOREFS_MAGIC
|
||||
0x73636673, // SECURITYFS_MAGIC
|
||||
0xf97cff8c, // SELINUX_MAGIC
|
||||
0x62656572, // SYSFS_MAGIC
|
||||
0x74726163 // TRACEFS_MAGIC
|
||||
=> true,
|
||||
else => false,
|
||||
};
|
||||
return iskern;
|
||||
}
|
||||
|
||||
|
||||
fn clamp(comptime T: type, comptime field: anytype, x: anytype) std.meta.fieldInfo(T, field).type {
|
||||
return util.castClamp(std.meta.fieldInfo(T, field).type, x);
|
||||
}
|
||||
|
||||
|
||||
fn truncate(comptime T: type, comptime field: anytype, x: anytype) std.meta.fieldInfo(T, field).type {
|
||||
return util.castTruncate(std.meta.fieldInfo(T, field).type, x);
|
||||
}
|
||||
|
||||
|
||||
pub fn statAt(parent: std.fs.Dir, name: [:0]const u8, follow: bool, symlink: ?*bool) !sink.Stat {
|
||||
// std.posix.fstatatZ() in Zig 0.14 is not suitable due to https://github.com/ziglang/zig/issues/23463
|
||||
var stat: std.c.Stat = undefined;
|
||||
if (std.c.fstatat(parent.fd, name, &stat, if (follow) 0 else std.c.AT.SYMLINK_NOFOLLOW) != 0) {
|
||||
return switch (std.c._errno().*) {
|
||||
@intFromEnum(std.c.E.NOENT) => error.FileNotFound,
|
||||
@intFromEnum(std.c.E.NAMETOOLONG) => error.NameTooLong,
|
||||
@intFromEnum(std.c.E.NOMEM) => error.OutOfMemory,
|
||||
@intFromEnum(std.c.E.ACCES) => error.AccessDenied,
|
||||
else => error.Unexpected,
|
||||
};
|
||||
}
|
||||
if (symlink) |s| s.* = std.c.S.ISLNK(stat.mode);
|
||||
return sink.Stat{
|
||||
.etype =
|
||||
if (std.c.S.ISDIR(stat.mode)) .dir
|
||||
else if (stat.nlink > 1) .link
|
||||
else if (!std.c.S.ISREG(stat.mode)) .nonreg
|
||||
else .reg,
|
||||
.blocks = clamp(sink.Stat, .blocks, stat.blocks),
|
||||
.size = clamp(sink.Stat, .size, stat.size),
|
||||
.dev = truncate(sink.Stat, .dev, stat.dev),
|
||||
.ino = truncate(sink.Stat, .ino, stat.ino),
|
||||
.nlink = clamp(sink.Stat, .nlink, stat.nlink),
|
||||
.ext = .{
|
||||
.pack = .{
|
||||
.hasmtime = true,
|
||||
.hasuid = true,
|
||||
.hasgid = true,
|
||||
.hasmode = true,
|
||||
},
|
||||
.mtime = clamp(model.Ext, .mtime, stat.mtime().sec),
|
||||
.uid = truncate(model.Ext, .uid, stat.uid),
|
||||
.gid = truncate(model.Ext, .gid, stat.gid),
|
||||
.mode = truncate(model.Ext, .mode, stat.mode),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
fn isCacheDir(dir: std.fs.Dir) bool {
|
||||
const sig = "Signature: 8a477f597d28d172789f06886806bc55";
|
||||
const f = dir.openFileZ("CACHEDIR.TAG", .{}) catch return false;
|
||||
defer f.close();
|
||||
var buf: [sig.len]u8 = undefined;
|
||||
const len = f.readAll(&buf) catch return false;
|
||||
return len == sig.len and std.mem.eql(u8, &buf, sig);
|
||||
}
|
||||
|
||||
|
||||
const State = struct {
|
||||
// Simple LIFO queue. Threads attempt to fully scan their assigned
|
||||
// directory before consulting this queue for their next task, so there
|
||||
// shouldn't be too much contention here.
|
||||
// TODO: unless threads keep juggling around leaf nodes, need to measure
|
||||
// actual use.
|
||||
// There's no real reason for this to be LIFO other than that that was the
|
||||
// easiest to implement. Queue order has an effect on scheduling, but it's
|
||||
// impossible for me to predict how that ends up affecting performance.
|
||||
queue: [QUEUE_SIZE]*Dir = undefined,
|
||||
queue_len: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
|
||||
queue_lock: std.Thread.Mutex = .{},
|
||||
queue_cond: std.Thread.Condition = .{},
|
||||
|
||||
threads: []Thread,
|
||||
waiting: usize = 0,
|
||||
|
||||
// No clue what this should be set to. Dir structs aren't small so we don't
|
||||
// want too have too many of them.
|
||||
const QUEUE_SIZE = 16;
|
||||
|
||||
// Returns true if the given Dir has been queued, false if the queue is full.
|
||||
fn tryPush(self: *State, d: *Dir) bool {
|
||||
if (self.queue_len.load(.acquire) == QUEUE_SIZE) return false;
|
||||
{
|
||||
self.queue_lock.lock();
|
||||
defer self.queue_lock.unlock();
|
||||
if (self.queue_len.load(.monotonic) == QUEUE_SIZE) return false;
|
||||
const slot = self.queue_len.fetchAdd(1, .monotonic);
|
||||
self.queue[slot] = d;
|
||||
}
|
||||
self.queue_cond.signal();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Blocks while the queue is empty, returns null when all threads are blocking.
|
||||
fn waitPop(self: *State) ?*Dir {
|
||||
self.queue_lock.lock();
|
||||
defer self.queue_lock.unlock();
|
||||
|
||||
self.waiting += 1;
|
||||
while (self.queue_len.load(.monotonic) == 0) {
|
||||
if (self.waiting == self.threads.len) {
|
||||
self.queue_cond.broadcast();
|
||||
return null;
|
||||
}
|
||||
self.queue_cond.wait(&self.queue_lock);
|
||||
}
|
||||
self.waiting -= 1;
|
||||
|
||||
const slot = self.queue_len.fetchSub(1, .monotonic) - 1;
|
||||
defer self.queue[slot] = undefined;
|
||||
return self.queue[slot];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const Dir = struct {
|
||||
fd: std.fs.Dir,
|
||||
dev: u64,
|
||||
pat: exclude.Patterns,
|
||||
it: std.fs.Dir.Iterator,
|
||||
sink: *sink.Dir,
|
||||
|
||||
fn create(fd: std.fs.Dir, dev: u64, pat: exclude.Patterns, s: *sink.Dir) *Dir {
|
||||
const d = main.allocator.create(Dir) catch unreachable;
|
||||
d.* = .{
|
||||
.fd = fd,
|
||||
.dev = dev,
|
||||
.pat = pat,
|
||||
.sink = s,
|
||||
.it = fd.iterate(),
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
fn destroy(d: *Dir, t: *Thread) void {
|
||||
d.pat.deinit();
|
||||
d.fd.close();
|
||||
d.sink.unref(t.sink);
|
||||
main.allocator.destroy(d);
|
||||
}
|
||||
};
|
||||
|
||||
const Thread = struct {
|
||||
thread_num: usize,
|
||||
sink: *sink.Thread,
|
||||
state: *State,
|
||||
stack: std.ArrayListUnmanaged(*Dir) = .empty,
|
||||
thread: std.Thread = undefined,
|
||||
namebuf: [4096]u8 = undefined,
|
||||
|
||||
fn scanOne(t: *Thread, dir: *Dir, name_: []const u8) void {
|
||||
if (name_.len > t.namebuf.len - 1) {
|
||||
dir.sink.addSpecial(t.sink, name_, .err);
|
||||
return;
|
||||
}
|
||||
|
||||
@memcpy(t.namebuf[0..name_.len], name_);
|
||||
t.namebuf[name_.len] = 0;
|
||||
const name = t.namebuf[0..name_.len:0];
|
||||
|
||||
const excluded = dir.pat.match(name);
|
||||
if (excluded == false) { // matched either a file or directory, so we can exclude this before stat()ing.
|
||||
dir.sink.addSpecial(t.sink, name, .pattern);
|
||||
return;
|
||||
}
|
||||
|
||||
var symlink: bool = undefined;
|
||||
var stat = statAt(dir.fd, name, false, &symlink) catch {
|
||||
dir.sink.addSpecial(t.sink, name, .err);
|
||||
return;
|
||||
};
|
||||
|
||||
if (main.config.follow_symlinks and symlink) {
|
||||
if (statAt(dir.fd, name, true, &symlink)) |nstat| {
|
||||
if (nstat.etype != .dir) {
|
||||
stat = nstat;
|
||||
// Symlink targets may reside on different filesystems,
|
||||
// this will break hardlink detection and counting so let's disable it.
|
||||
if (stat.etype == .link and stat.dev != dir.dev) {
|
||||
stat.etype = .reg;
|
||||
stat.nlink = 1;
|
||||
}
|
||||
}
|
||||
} else |_| {}
|
||||
}
|
||||
|
||||
if (main.config.same_fs and stat.dev != dir.dev) {
|
||||
dir.sink.addSpecial(t.sink, name, .otherfs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (stat.etype != .dir) {
|
||||
dir.sink.addStat(t.sink, name, &stat);
|
||||
return;
|
||||
}
|
||||
|
||||
if (excluded == true) {
|
||||
dir.sink.addSpecial(t.sink, name, .pattern);
|
||||
return;
|
||||
}
|
||||
|
||||
var edir = dir.fd.openDirZ(name, .{ .no_follow = true, .iterate = true }) catch {
|
||||
const s = dir.sink.addDir(t.sink, name, &stat);
|
||||
s.setReadError(t.sink);
|
||||
s.unref(t.sink);
|
||||
return;
|
||||
};
|
||||
|
||||
if (@import("builtin").os.tag == .linux
|
||||
and main.config.exclude_kernfs
|
||||
and stat.dev != dir.dev
|
||||
and isKernfs(edir)
|
||||
) {
|
||||
edir.close();
|
||||
dir.sink.addSpecial(t.sink, name, .kernfs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (main.config.exclude_caches and isCacheDir(edir)) {
|
||||
dir.sink.addSpecial(t.sink, name, .pattern);
|
||||
edir.close();
|
||||
return;
|
||||
}
|
||||
|
||||
const s = dir.sink.addDir(t.sink, name, &stat);
|
||||
const ndir = Dir.create(edir, stat.dev, dir.pat.enter(name), s);
|
||||
if (main.config.threads == 1 or !t.state.tryPush(ndir))
|
||||
t.stack.append(main.allocator, ndir) catch unreachable;
|
||||
}
|
||||
|
||||
fn run(t: *Thread) void {
|
||||
defer t.stack.deinit(main.allocator);
|
||||
while (t.state.waitPop()) |dir| {
|
||||
t.stack.append(main.allocator, dir) catch unreachable;
|
||||
|
||||
while (t.stack.items.len > 0) {
|
||||
const d = t.stack.items[t.stack.items.len - 1];
|
||||
|
||||
t.sink.setDir(d.sink);
|
||||
if (t.thread_num == 0) main.handleEvent(false, false);
|
||||
|
||||
const entry = d.it.next() catch blk: {
|
||||
dir.sink.setReadError(t.sink);
|
||||
break :blk null;
|
||||
};
|
||||
if (entry) |e| t.scanOne(d, e.name)
|
||||
else {
|
||||
t.sink.setDir(null);
|
||||
t.stack.pop().?.destroy(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub fn scan(path: [:0]const u8) !void {
|
||||
const sink_threads = sink.createThreads(main.config.threads);
|
||||
defer sink.done();
|
||||
|
||||
var symlink: bool = undefined;
|
||||
const stat = try statAt(std.fs.cwd(), path, true, &symlink);
|
||||
const fd = try std.fs.cwd().openDirZ(path, .{ .iterate = true });
|
||||
|
||||
var state = State{
|
||||
.threads = main.allocator.alloc(Thread, main.config.threads) catch unreachable,
|
||||
};
|
||||
defer main.allocator.free(state.threads);
|
||||
|
||||
const root = sink.createRoot(path, &stat);
|
||||
const dir = Dir.create(fd, stat.dev, exclude.getPatterns(path), root);
|
||||
_ = state.tryPush(dir);
|
||||
|
||||
for (sink_threads, state.threads, 0..) |*s, *t, n|
|
||||
t.* = .{ .sink = s, .state = &state, .thread_num = n };
|
||||
|
||||
// XXX: Continue with fewer threads on error?
|
||||
for (state.threads[1..]) |*t| {
|
||||
t.thread = std.Thread.spawn(
|
||||
.{ .stack_size = 128 * 1024, .allocator = main.allocator }, Thread.run, .{t}
|
||||
) catch |e| ui.die("Error spawning thread: {}\n", .{e});
|
||||
}
|
||||
state.threads[0].run();
|
||||
for (state.threads[1..]) |*t| t.thread.join();
|
||||
}
|
||||
245
src/settings.c
245
src/settings.c
|
|
@ -1,245 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
|
||||
int settingsCli(int argc, char **argv) {
|
||||
int i, j;
|
||||
char gotdir = 0;
|
||||
|
||||
/* load defaults */
|
||||
memset(sdir, 0, PATH_MAX);
|
||||
getcwd(sdir, PATH_MAX);
|
||||
sflags = 0;
|
||||
sdelay = 100;
|
||||
bflags = BF_SIZE | BF_DESC;
|
||||
|
||||
/* read from commandline */
|
||||
for(i=1; i<argc; i++) {
|
||||
if(argv[i][0] == '-') {
|
||||
if(argv[i][1] == 'X' || strcmp(argv[i], "--exclude-from") == 0 || strcmp(argv[i], "--exclude") == 0) {
|
||||
if(i+1 >= argc) {
|
||||
printf("Option %s requires an argument\n", argv[i]);
|
||||
exit(1);
|
||||
}
|
||||
if(strcmp(argv[i], "--exclude") == 0)
|
||||
addExclude(argv[++i]);
|
||||
else if(addExcludeFile(argv[++i])) {
|
||||
printf("Can't open %s: %s\n", argv[i], strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
for(j=1; j < strlen(argv[i]); j++)
|
||||
switch(argv[i][j]) {
|
||||
case 'a': sflags |= SF_AS; break;
|
||||
case 'x': sflags |= SF_SMFS; break;
|
||||
case 'q': sdelay = 2000; break;
|
||||
case '?':
|
||||
case 'h':
|
||||
printf("ncdu [-ahvx] [dir]\n\n");
|
||||
printf(" -a Apparent sizes\n");
|
||||
printf(" -h This help message\n");
|
||||
printf(" -q x Set the refresh interval in seconds\n");
|
||||
printf(" -v Print version\n");
|
||||
printf(" -x Same filesystem\n");
|
||||
exit(0);
|
||||
case 'v':
|
||||
printf("ncdu %s\n", PACKAGE_VERSION);
|
||||
exit(0);
|
||||
default:
|
||||
printf("Unknown option: -%c\n", argv[i][j]);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
strcpy(sdir, argv[i]);
|
||||
gotdir = 1;
|
||||
}
|
||||
}
|
||||
return(gotdir ? 0 : 1);
|
||||
}
|
||||
|
||||
int settingsGet(void) {
|
||||
WINDOW *set;
|
||||
FORM *setf;
|
||||
FIELD *fields[11];
|
||||
int w, h, cx, cy, i, j, ch;
|
||||
int fw, fh, fy, fx, fnrow, fnbuf;
|
||||
char tmp[10], *buf = "", rst = 0;
|
||||
erase();
|
||||
refresh();
|
||||
/* h, w, y, x */
|
||||
fields[0] = new_field(1, 10, 0, 0, 0, 0);
|
||||
fields[1] = new_field(1, 43, 0, 11, 0, 0);
|
||||
fields[2] = new_field(1, 16, 1, 11, 0, 0);
|
||||
fields[3] = new_field(1, 1, 1, 27, 0, 0);
|
||||
fields[4] = new_field(1, 1, 1, 28, 0, 0);
|
||||
fields[5] = new_field(1, 16, 2, 12, 0, 0);
|
||||
fields[6] = new_field(1, 1, 2, 27, 0, 0);
|
||||
fields[7] = new_field(1, 1, 2, 28, 0, 0);
|
||||
fields[8] = new_field(1, 6, 3, 11, 0, 0);
|
||||
fields[9] = new_field(1, 9, 3, 19, 0, 0);
|
||||
fields[10] = NULL;
|
||||
|
||||
/* Directory */
|
||||
field_opts_off(fields[0], O_ACTIVE);
|
||||
set_field_buffer(fields[0], 0, "Directory:");
|
||||
set_field_back(fields[1], A_UNDERLINE);
|
||||
field_opts_off(fields[1], O_STATIC);
|
||||
field_opts_off(fields[1], O_AUTOSKIP);
|
||||
set_max_field(fields[1], PATH_MAX);
|
||||
set_field_buffer(fields[1], 0, sdir);
|
||||
/* One filesystem */
|
||||
field_opts_off(fields[2], O_ACTIVE);
|
||||
set_field_buffer(fields[2], 0, "One filesystem [");
|
||||
field_opts_off(fields[3], O_AUTOSKIP);
|
||||
set_field_back(fields[3], A_UNDERLINE);
|
||||
set_field_buffer(fields[3], 0, sflags & SF_SMFS ? "X" : " ");
|
||||
field_opts_off(fields[4], O_ACTIVE);
|
||||
set_field_buffer(fields[4], 0, "]");
|
||||
/* Apparent sizes */
|
||||
field_opts_off(fields[5], O_ACTIVE);
|
||||
set_field_buffer(fields[5], 0, "Apparent size [");
|
||||
field_opts_off(fields[6], O_AUTOSKIP);
|
||||
set_field_back(fields[6], A_UNDERLINE);
|
||||
set_field_buffer(fields[6], 0, sflags & SF_AS ? "X" : " ");
|
||||
field_opts_off(fields[7], O_ACTIVE);
|
||||
set_field_buffer(fields[7], 0, "]");
|
||||
/* buttons */
|
||||
set_field_buffer(fields[8], 0, "[OK]");
|
||||
set_field_buffer(fields[9], 0, "[CLOSE]");
|
||||
|
||||
setf = new_form(fields);
|
||||
h=8;w=60;
|
||||
|
||||
set = newwin(h, w, winrows/2 - h/2, wincols/2 - w/2);
|
||||
keypad(stdscr, TRUE);
|
||||
keypad(set, TRUE);
|
||||
box(set, 0, 0);
|
||||
curs_set(1);
|
||||
|
||||
set_form_win(setf, set);
|
||||
set_form_sub(setf, derwin(set, h-3, w-4, 2, 2));
|
||||
|
||||
wattron(set, A_BOLD);
|
||||
mvwaddstr(set, 0, 4, "Calculate disk space usage...");
|
||||
wattroff(set, A_BOLD);
|
||||
post_form(setf);
|
||||
refresh();
|
||||
wrefresh(set);
|
||||
|
||||
while((ch = wgetch(set))) {
|
||||
getyx(set, cy, cx);
|
||||
cy-=2; cx-=2;
|
||||
for(i=field_count(setf); --i>=0; ) {
|
||||
field_info(fields[i], &fh, &fw, &fy, &fx, &fnrow, &fnbuf);
|
||||
if(cy >= fy && cy < fy+fh && cx >= fx && cx < fx+fw) {
|
||||
buf = field_buffer(fields[i], 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch(ch) {
|
||||
case KEY_BACKSPACE:
|
||||
case 127: form_driver(setf, REQ_DEL_PREV); break;
|
||||
case KEY_LL:
|
||||
case KEY_END: form_driver(setf, REQ_END_LINE); break;
|
||||
case KEY_HOME: form_driver(setf, REQ_BEG_LINE); break;
|
||||
case KEY_LEFT: form_driver(setf, REQ_LEFT_CHAR); break;
|
||||
case KEY_RIGHT:
|
||||
if(i == 1) {
|
||||
for(j=strlen(buf);--j>i;)
|
||||
if(buf[j] != ' ')
|
||||
break;
|
||||
if(j < fw && cx > fx+j)
|
||||
break;
|
||||
}
|
||||
form_driver(setf, REQ_RIGHT_CHAR);
|
||||
break;
|
||||
case KEY_DC: form_driver(setf, REQ_DEL_CHAR); break;
|
||||
case KEY_DOWN: form_driver(setf, REQ_NEXT_FIELD); break;
|
||||
case KEY_UP: form_driver(setf, REQ_PREV_FIELD); break;
|
||||
case '\t': form_driver(setf, REQ_NEXT_FIELD); break;
|
||||
case KEY_RESIZE: rst = 1; goto setend; break;
|
||||
default:
|
||||
if(i == 9) {
|
||||
rst = 2;
|
||||
goto setend;
|
||||
}
|
||||
if(i == 8 || ch == '\n')
|
||||
goto setend;
|
||||
if(i == 3 || i == 6)
|
||||
set_field_buffer(fields[i], 0, buf[0] == ' ' ? "X" : " ");
|
||||
else if(!isprint(ch)) break;
|
||||
else if(i == 9) {
|
||||
if(!isdigit(ch)) strcpy(tmp, " 0");
|
||||
else if(buf[0] != ' ' || buf[1] == ' ' || buf[1] == '0') sprintf(tmp, " %c", ch);
|
||||
else sprintf(tmp, "%c%c", buf[1], ch);
|
||||
set_field_buffer(fields[i], 0, tmp);
|
||||
} else
|
||||
form_driver(setf, ch);
|
||||
break;
|
||||
}
|
||||
wrefresh(set);
|
||||
}
|
||||
setend:
|
||||
/* !!!WARNING!!! ugly hack !!!WARNING!!! */
|
||||
set_current_field(setf, fields[1]);
|
||||
form_driver(setf, REQ_END_LINE);
|
||||
for(i=0; i<40; i++)
|
||||
form_driver(setf, ' ');
|
||||
dynamic_field_info(fields[1], &fh, &fw, &fx);
|
||||
memcpy(sdir, field_buffer(fields[1], 0), fw);
|
||||
for(i=strlen(sdir); --i>=0;)
|
||||
if(sdir[i] != ' ' && (sdir[i] != '/' || i == 0)) {
|
||||
sdir[i+1] = 0;
|
||||
break;
|
||||
}
|
||||
/* EOW */
|
||||
sflags = sflags & SF_IGNS;
|
||||
buf = field_buffer(fields[3], 0);
|
||||
if(buf[0] != ' ') sflags |= SF_SMFS;
|
||||
buf = field_buffer(fields[6], 0);
|
||||
if(buf[0] != ' ') sflags |= SF_AS;
|
||||
|
||||
unpost_form(setf);
|
||||
for(i=10;--i>=0;)
|
||||
free_field(fields[i]);
|
||||
werase(set);
|
||||
delwin(set);
|
||||
erase();
|
||||
refresh();
|
||||
curs_set(0);
|
||||
return(rst);
|
||||
}
|
||||
|
||||
int settingsWin(void) {
|
||||
int r;
|
||||
while((r = settingsGet()) == 1) {
|
||||
ncresize();
|
||||
return(settingsWin());
|
||||
}
|
||||
return(r);
|
||||
}
|
||||
498
src/sink.zig
Normal file
498
src/sink.zig
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Terminology note:
|
||||
// "source" is where scan results come from, these are scan.zig, mem_src.zig
|
||||
// and json_import.zig.
|
||||
// "sink" is where scan results go to. This file provides a generic sink API
|
||||
// for sources to use. The API forwards the results to specific sink
|
||||
// implementations (mem_sink.zig or json_export.zig) and provides progress
|
||||
// updates.
|
||||
|
||||
// API for sources:
|
||||
//
|
||||
// Single-threaded:
|
||||
//
|
||||
// createThreads(1)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(name, stat)
|
||||
// (no dir.stuff here)
|
||||
// sub.addstuff();
|
||||
// sub.unref();
|
||||
// dir.unref();
|
||||
// done()
|
||||
//
|
||||
// Multi-threaded interleaving:
|
||||
//
|
||||
// createThreads(n)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(...)
|
||||
// sub.addstuff();
|
||||
// sub2 = dir.addDir(..);
|
||||
// sub.unref();
|
||||
// dir.unref(); // <- no more direct descendants for x, but subdirs could still be active
|
||||
// sub2.addStuff();
|
||||
// sub2.unref(); // <- this is where 'dir' is really done.
|
||||
// done()
|
||||
//
|
||||
// Rule:
|
||||
// No concurrent method calls on a single Dir object, but objects may be passed between threads.
|
||||
|
||||
|
||||
// Concise stat struct for fields we're interested in, with the types used by the model.
|
||||
pub const Stat = struct {
|
||||
etype: model.EType = .reg,
|
||||
blocks: model.Blocks = 0,
|
||||
size: u64 = 0,
|
||||
dev: u64 = 0,
|
||||
ino: u64 = 0,
|
||||
nlink: u31 = 0,
|
||||
ext: model.Ext = .{},
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
refcnt: std.atomic.Value(usize) = std.atomic.Value(usize).init(1),
|
||||
name: []const u8,
|
||||
parent: ?*Dir,
|
||||
out: Out,
|
||||
|
||||
const Out = union(enum) {
|
||||
mem: mem_sink.Dir,
|
||||
json: json_export.Dir,
|
||||
bin: bin_export.Dir,
|
||||
};
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
std.debug.assert(@intFromEnum(sp) < 0); // >=0 aren't "special"
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.addSpecial(&t.sink.mem, name, sp),
|
||||
.json => |*j| j.addSpecial(name, sp),
|
||||
.bin => |*b| b.addSpecial(&t.sink.bin, name, sp),
|
||||
}
|
||||
if (sp == .err) {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
const p = d.path();
|
||||
global.last_error = std.fs.path.joinZ(main.allocator, &.{ p, name }) catch unreachable;
|
||||
main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) void {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes((stat.blocks *| 512) / @max(1, stat.nlink));
|
||||
std.debug.assert(stat.etype != .dir);
|
||||
switch (d.out) {
|
||||
.mem => |*m| _ = m.addStat(&t.sink.mem, name, stat),
|
||||
.json => |*j| j.addStat(name, stat),
|
||||
.bin => |*b| b.addStat(&t.sink.bin, name, stat),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) *Dir {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes(stat.blocks *| 512);
|
||||
std.debug.assert(stat.etype == .dir);
|
||||
std.debug.assert(d.out != .json or d.refcnt.load(.monotonic) == 1);
|
||||
|
||||
const s = main.allocator.create(Dir) catch unreachable;
|
||||
s.* = .{
|
||||
.name = main.allocator.dupe(u8, name) catch unreachable,
|
||||
.parent = d,
|
||||
.out = switch (d.out) {
|
||||
.mem => |*m| .{ .mem = m.addDir(&t.sink.mem, name, stat) },
|
||||
.json => |*j| .{ .json = j.addDir(name, stat) },
|
||||
.bin => |*b| .{ .bin = b.addDir(stat) },
|
||||
},
|
||||
};
|
||||
d.ref();
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir, t: *Thread) void {
|
||||
_ = t;
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.setReadError(),
|
||||
.json => |*j| j.setReadError(),
|
||||
.bin => |*b| b.setReadError(),
|
||||
}
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = d.path();
|
||||
}
|
||||
|
||||
fn path(d: *Dir) [:0]u8 {
|
||||
var components: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*Dir = d;
|
||||
while (it) |e| : (it = e.parent) components.append(main.allocator, e.name) catch unreachable;
|
||||
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
fn ref(d: *Dir) void {
|
||||
_ = d.refcnt.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn unref(d: *Dir, t: *Thread) void {
|
||||
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
||||
_ = d.refcnt.load(.acquire);
|
||||
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
||||
.json => |*j| j.final(),
|
||||
.bin => |*b| b.final(&t.sink.bin, d.name, if (d.parent) |p| &p.out.bin else null),
|
||||
}
|
||||
|
||||
if (d.parent) |p| p.unref(t);
|
||||
if (d.name.len > 0) main.allocator.free(d.name);
|
||||
main.allocator.destroy(d);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
current_dir: ?*Dir = null,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
// On 32-bit architectures, bytes_seen is protected by the above mutex instead.
|
||||
bytes_seen: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
files_seen: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
|
||||
sink: union {
|
||||
mem: mem_sink.Thread,
|
||||
json: void,
|
||||
bin: bin_export.Thread,
|
||||
} = .{.mem = .{}},
|
||||
|
||||
fn addBytes(t: *Thread, bytes: u64) void {
|
||||
if (@bitSizeOf(usize) >= 64) _ = t.bytes_seen.fetchAdd(bytes, .monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.bytes_seen.raw += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
fn getBytes(t: *Thread) u64 {
|
||||
if (@bitSizeOf(usize) >= 64) return t.bytes_seen.load(.monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
return t.bytes_seen.raw;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setDir(t: *Thread, d: ?*Dir) void {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.current_dir = d;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var state: enum { done, err, zeroing, hlcnt, running } = .running;
|
||||
pub var threads: []Thread = undefined;
|
||||
pub var sink: enum { json, mem, bin } = .mem;
|
||||
|
||||
pub var last_error: ?[:0]u8 = null;
|
||||
var last_error_lock = std.Thread.Mutex{};
|
||||
var need_confirm_quit = false;
|
||||
};
|
||||
|
||||
|
||||
// Must be the first thing to call from a source; initializes global state.
|
||||
pub fn createThreads(num: usize) []Thread {
|
||||
// JSON export does not support multiple threads, scan into memory first.
|
||||
if (global.sink == .json and num > 1) {
|
||||
global.sink = .mem;
|
||||
mem_sink.global.stats = false;
|
||||
}
|
||||
|
||||
global.state = .running;
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = null;
|
||||
global.threads = main.allocator.alloc(Thread, num) catch unreachable;
|
||||
for (global.threads) |*t| t.* = .{
|
||||
.sink = switch (global.sink) {
|
||||
.mem => .{ .mem = .{} },
|
||||
.json => .{ .json = {} },
|
||||
.bin => .{ .bin = .{} },
|
||||
},
|
||||
};
|
||||
return global.threads;
|
||||
}
|
||||
|
||||
|
||||
// Must be the last thing to call from a source.
|
||||
pub fn done() void {
|
||||
switch (global.sink) {
|
||||
.mem => mem_sink.done(),
|
||||
.json => json_export.done(),
|
||||
.bin => bin_export.done(global.threads),
|
||||
}
|
||||
global.state = .done;
|
||||
main.allocator.free(global.threads);
|
||||
|
||||
// We scanned into memory, now we need to scan from memory to JSON
|
||||
if (global.sink == .mem and !mem_sink.global.stats) {
|
||||
global.sink = .json;
|
||||
mem_src.run(model.root);
|
||||
}
|
||||
|
||||
// Clear the screen when done.
|
||||
if (main.config.scan_ui == .line) main.handleEvent(false, true);
|
||||
}
|
||||
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const Stat) *Dir {
|
||||
const d = main.allocator.create(Dir) catch unreachable;
|
||||
d.* = .{
|
||||
.name = main.allocator.dupe(u8, path) catch unreachable,
|
||||
.parent = null,
|
||||
.out = switch (global.sink) {
|
||||
.mem => .{ .mem = mem_sink.createRoot(path, stat) },
|
||||
.json => .{ .json = json_export.createRoot(path, stat) },
|
||||
.bin => .{ .bin = bin_export.createRoot(stat, global.threads) },
|
||||
},
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
fn drawConsole() void {
|
||||
const st = struct {
|
||||
var ansi: ?bool = null;
|
||||
var lines_written: usize = 0;
|
||||
};
|
||||
const stderr = if (@hasDecl(std.io, "getStdErr")) std.io.getStdErr() else std.fs.File.stderr();
|
||||
const ansi = st.ansi orelse blk: {
|
||||
const t = stderr.supportsAnsiEscapeCodes();
|
||||
st.ansi = t;
|
||||
break :blk t;
|
||||
};
|
||||
|
||||
var buf: [4096]u8 = undefined;
|
||||
var strm = std.io.fixedBufferStream(buf[0..]);
|
||||
var wr = strm.writer();
|
||||
while (ansi and st.lines_written > 0) {
|
||||
wr.writeAll("\x1b[1F\x1b[2K") catch {};
|
||||
st.lines_written -= 1;
|
||||
}
|
||||
|
||||
if (global.state == .hlcnt) {
|
||||
wr.writeAll("Counting hardlinks...") catch {};
|
||||
if (model.inodes.add_total > 0)
|
||||
wr.print(" {} / {}", .{ model.inodes.add_done, model.inodes.add_total }) catch {};
|
||||
wr.writeByte('\n') catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
} else if (global.state == .running) {
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
const r = ui.FmtSize.fmt(bytes);
|
||||
wr.print("{} files / {s}{s}\n", .{files, r.num(), r.unit}) catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
for (global.threads, 0..) |*t, i| {
|
||||
const dir = blk: {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
wr.print(" #{}: {s}\n", .{i+1, ui.shorten(ui.toUtf8(dir orelse "(waiting)"), 73)}) catch {};
|
||||
st.lines_written += 1;
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
stderr.writeAll(strm.getWritten()) catch {};
|
||||
}
|
||||
|
||||
|
||||
fn drawProgress() void {
|
||||
const st = struct { var animation_pos: usize = 0; };
|
||||
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
|
||||
ui.init();
|
||||
const width = ui.cols -| 5;
|
||||
const numthreads: u32 = @intCast(@min(global.threads.len, @max(1, ui.rows -| 10)));
|
||||
const box = ui.Box.create(8 + numthreads, width, "Scanning...");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Total items: ");
|
||||
ui.addnum(.default, files);
|
||||
|
||||
if (width > 48) {
|
||||
box.move(2, 30);
|
||||
ui.addstr("size: ");
|
||||
ui.addsize(.default, bytes);
|
||||
}
|
||||
|
||||
for (0..numthreads) |i| {
|
||||
box.move(3+@as(u32, @intCast(i)), 4);
|
||||
const dir = blk: {
|
||||
const t = &global.threads[i];
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
ui.addstr(ui.shorten(ui.toUtf8(dir orelse "(waiting)"), width -| 6));
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
|
||||
blk: {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
const err = global.last_error orelse break :blk;
|
||||
box.move(4 + numthreads, 2);
|
||||
ui.style(.bold);
|
||||
ui.addstr("Warning: ");
|
||||
ui.style(.default);
|
||||
ui.addstr("error scanning ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(err), width -| 28));
|
||||
box.move(5 + numthreads, 3);
|
||||
ui.addstr("some directory sizes may not be correct.");
|
||||
}
|
||||
|
||||
if (global.need_confirm_quit) {
|
||||
box.move(6 + numthreads, width -| 20);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('y');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to confirm");
|
||||
} else {
|
||||
box.move(6 + numthreads, width -| 18);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('q');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to abort");
|
||||
}
|
||||
|
||||
if (main.config.update_delay < std.time.ns_per_s and width > 40) {
|
||||
const txt = "Scanning...";
|
||||
st.animation_pos += 1;
|
||||
if (st.animation_pos >= txt.len*2) st.animation_pos = 0;
|
||||
if (st.animation_pos < txt.len) {
|
||||
box.move(6 + numthreads, 2);
|
||||
for (txt[0..st.animation_pos + 1]) |t| ui.addch(t);
|
||||
} else {
|
||||
var i: u32 = txt.len-1;
|
||||
while (i > st.animation_pos-txt.len) : (i -= 1) {
|
||||
box.move(6 + numthreads, 2+i);
|
||||
ui.addch(txt[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn drawError() void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(6, width, "Scan error");
|
||||
|
||||
box.move(2, 2);
|
||||
ui.addstr("Unable to open directory:");
|
||||
box.move(3, 4);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(global.last_error.?), width -| 10));
|
||||
|
||||
box.move(4, width -| 27);
|
||||
ui.addstr("Press any key to continue");
|
||||
}
|
||||
|
||||
|
||||
fn drawMessage(msg: []const u8) void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(4, width, "Scan error");
|
||||
box.move(2, 2);
|
||||
ui.addstr(msg);
|
||||
}
|
||||
|
||||
|
||||
pub fn draw() void {
|
||||
switch (main.config.scan_ui.?) {
|
||||
.none => {},
|
||||
.line => drawConsole(),
|
||||
.full => {
|
||||
ui.init();
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => drawError(),
|
||||
.zeroing => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Initializing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Clearing directory counts...");
|
||||
},
|
||||
.hlcnt => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Finalizing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Counting hardlinks... ");
|
||||
if (model.inodes.add_total > 0) {
|
||||
ui.addnum(.default, model.inodes.add_done);
|
||||
ui.addstr(" / ");
|
||||
ui.addnum(.default, model.inodes.add_total);
|
||||
}
|
||||
},
|
||||
.running => drawProgress(),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => main.state = .browse,
|
||||
.zeroing => {},
|
||||
.hlcnt => {},
|
||||
.running => {
|
||||
switch (ch) {
|
||||
'q' => {
|
||||
if (main.config.confirm_quit) global.need_confirm_quit = !global.need_confirm_quit
|
||||
else ui.quit();
|
||||
},
|
||||
'y', 'Y' => if (global.need_confirm_quit) ui.quit(),
|
||||
else => global.need_confirm_quit = false,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
690
src/ui.zig
Normal file
690
src/ui.zig
Normal file
|
|
@ -0,0 +1,690 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Ncurses wrappers and TUI helper functions.
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub var inited: bool = false;
|
||||
pub var main_thread: std.Thread.Id = undefined;
|
||||
pub var oom_threads = std.atomic.Value(usize).init(0);
|
||||
|
||||
pub var rows: u32 = undefined;
|
||||
pub var cols: u32 = undefined;
|
||||
|
||||
pub fn die(comptime fmt: []const u8, args: anytype) noreturn {
|
||||
deinit();
|
||||
std.debug.print(fmt, args);
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn quit() noreturn {
|
||||
deinit();
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
const sleep = if (@hasDecl(std.time, "sleep")) std.time.sleep else std.Thread.sleep;
|
||||
|
||||
// Should be called when malloc fails. Will show a message to the user, wait
|
||||
// for a second and return to give it another try.
|
||||
// Glitch: this function may be called while we're in the process of drawing
|
||||
// the ncurses window, in which case the deinit/reinit will cause the already
|
||||
// drawn part to be discarded. A redraw will fix that, but that tends to only
|
||||
// happen after user input.
|
||||
// Also, init() and other ncurses-related functions may have hidden allocation,
|
||||
// no clue if ncurses will consistently report OOM, but we're not handling that
|
||||
// right now.
|
||||
pub fn oom() void {
|
||||
@branchHint(.cold);
|
||||
if (main_thread == std.Thread.getCurrentId()) {
|
||||
const haveui = inited;
|
||||
deinit();
|
||||
std.debug.print("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8", .{});
|
||||
sleep(std.time.ns_per_s);
|
||||
if (haveui)
|
||||
init();
|
||||
} else {
|
||||
_ = oom_threads.fetchAdd(1, .monotonic);
|
||||
sleep(std.time.ns_per_s);
|
||||
_ = oom_threads.fetchSub(1, .monotonic);
|
||||
}
|
||||
}
|
||||
|
||||
// Dumb strerror() alternative for Zig file I/O, not complete.
|
||||
// (Would be nicer if Zig just exposed errno so I could call strerror() directly)
|
||||
pub fn errorString(e: anyerror) [:0]const u8 {
|
||||
return switch (e) {
|
||||
error.AccessDenied => "Access denied",
|
||||
error.DirNotEmpty => "Directory not empty",
|
||||
error.DiskQuota => "Disk quota exceeded",
|
||||
error.FileBusy => "File is busy",
|
||||
error.FileNotFound => "No such file or directory",
|
||||
error.FileSystem => "I/O error", // This one is shit, Zig uses this for both EIO and ELOOP in execve().
|
||||
error.FileTooBig => "File too big",
|
||||
error.InputOutput => "I/O error",
|
||||
error.InvalidExe => "Invalid executable",
|
||||
error.IsDir => "Is a directory",
|
||||
error.NameTooLong => "Filename too long",
|
||||
error.NoSpaceLeft => "No space left on device",
|
||||
error.NotDir => "Not a directory",
|
||||
error.OutOfMemory, error.SystemResources => "Out of memory",
|
||||
error.ProcessFdQuotaExceeded => "Process file descriptor limit exceeded",
|
||||
error.ReadOnlyFilesystem => "Read-only filesystem",
|
||||
error.SymlinkLoop => "Symlink loop",
|
||||
error.SystemFdQuotaExceeded => "System file descriptor limit exceeded",
|
||||
error.EndOfStream => "Unexpected end of file",
|
||||
else => @errorName(e),
|
||||
};
|
||||
}
|
||||
|
||||
var to_utf8_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
fn toUtf8BadChar(ch: u8) bool {
|
||||
return switch (ch) {
|
||||
0...0x1F, 0x7F => true,
|
||||
else => false
|
||||
};
|
||||
}
|
||||
|
||||
// Utility function to convert a string to valid (mostly) printable UTF-8.
|
||||
// Invalid codepoints will be encoded as '\x##' strings.
|
||||
// Returns the given string if it's already valid, otherwise points to an
|
||||
// internal buffer that will be invalidated on the next call.
|
||||
// (Doesn't check for non-printable Unicode characters)
|
||||
// (This program assumes that the console locale is UTF-8, but file names may not be)
|
||||
pub fn toUtf8(in: [:0]const u8) [:0]const u8 {
|
||||
const hasBadChar = blk: {
|
||||
for (in) |ch| if (toUtf8BadChar(ch)) break :blk true;
|
||||
break :blk false;
|
||||
};
|
||||
if (!hasBadChar and std.unicode.utf8ValidateSlice(in)) return in;
|
||||
var i: usize = 0;
|
||||
to_utf8_buf.shrinkRetainingCapacity(0);
|
||||
while (i < in.len) {
|
||||
if (std.unicode.utf8ByteSequenceLength(in[i])) |cp_len| {
|
||||
if (!toUtf8BadChar(in[i]) and i + cp_len <= in.len) {
|
||||
if (std.unicode.utf8Decode(in[i .. i + cp_len])) |_| {
|
||||
to_utf8_buf.appendSlice(main.allocator, in[i .. i + cp_len]) catch unreachable;
|
||||
i += cp_len;
|
||||
continue;
|
||||
} else |_| {}
|
||||
}
|
||||
} else |_| {}
|
||||
to_utf8_buf.writer(main.allocator).print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||
i += 1;
|
||||
}
|
||||
return util.arrayListBufZ(&to_utf8_buf, main.allocator);
|
||||
}
|
||||
|
||||
var shorten_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
// Shorten the given string to fit in the given number of columns.
|
||||
// If the string is too long, only the prefix and suffix will be printed, with '...' in between.
|
||||
// Input is assumed to be valid UTF-8.
|
||||
// Return value points to the input string or to an internal buffer that is
|
||||
// invalidated on a subsequent call.
|
||||
pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
||||
if (max_width < 4) return "...";
|
||||
var total_width: u32 = 0;
|
||||
var prefix_width: u32 = 0;
|
||||
var prefix_end: u32 = 0;
|
||||
var prefix_done = false;
|
||||
var it = std.unicode.Utf8View.initUnchecked(in).iterator();
|
||||
while (it.nextCodepoint()) |cp| {
|
||||
// XXX: libc assumption: wchar_t is a Unicode point. True for most modern libcs?
|
||||
// (The "proper" way is to use mbtowc(), but I'd rather port the musl wcwidth implementation to Zig so that I *know* it'll be Unicode.
|
||||
// On the other hand, ncurses also use wcwidth() so that would cause duplicated code. Ugh)
|
||||
const cp_width_ = c.wcwidth(cp);
|
||||
const cp_width: u32 = @intCast(if (cp_width_ < 0) 0 else cp_width_);
|
||||
const cp_len = std.unicode.utf8CodepointSequenceLength(cp) catch unreachable;
|
||||
total_width += cp_width;
|
||||
if (!prefix_done and prefix_width + cp_width <= @divFloor(max_width-1, 2)-1) {
|
||||
prefix_width += cp_width;
|
||||
prefix_end += cp_len;
|
||||
} else
|
||||
prefix_done = true;
|
||||
}
|
||||
if (total_width <= max_width) return in;
|
||||
|
||||
shorten_buf.shrinkRetainingCapacity(0);
|
||||
shorten_buf.appendSlice(main.allocator, in[0..prefix_end]) catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, "...") catch unreachable;
|
||||
|
||||
var start_width: u32 = prefix_width;
|
||||
var start_len: u32 = prefix_end;
|
||||
it = std.unicode.Utf8View.initUnchecked(in[prefix_end..]).iterator();
|
||||
while (it.nextCodepoint()) |cp| {
|
||||
const cp_width_ = c.wcwidth(cp);
|
||||
const cp_width: u32 = @intCast(if (cp_width_ < 0) 0 else cp_width_);
|
||||
const cp_len = std.unicode.utf8CodepointSequenceLength(cp) catch unreachable;
|
||||
start_width += cp_width;
|
||||
start_len += cp_len;
|
||||
if (total_width - start_width <= max_width - prefix_width - 3) {
|
||||
shorten_buf.appendSlice(main.allocator, in[start_len..]) catch unreachable;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return util.arrayListBufZ(&shorten_buf, main.allocator);
|
||||
}
|
||||
|
||||
fn shortenTest(in: [:0]const u8, max_width: u32, out: [:0]const u8) !void {
|
||||
try std.testing.expectEqualStrings(out, shorten(in, max_width));
|
||||
}
|
||||
|
||||
test "shorten" {
|
||||
_ = c.setlocale(c.LC_ALL, ""); // libc wcwidth() may not recognize Unicode without this
|
||||
const t = shortenTest;
|
||||
try t("abcde", 3, "...");
|
||||
try t("abcde", 5, "abcde");
|
||||
try t("abcde", 4, "...e");
|
||||
try t("abcdefgh", 6, "a...gh");
|
||||
try t("abcdefgh", 7, "ab...gh");
|
||||
try t("ABCDEFGH", 16, "ABCDEFGH");
|
||||
try t("ABCDEFGH", 7, "A...H");
|
||||
try t("ABCDEFGH", 8, "A...H");
|
||||
try t("ABCDEFGH", 9, "A...GH");
|
||||
try t("AaBCDEFGH", 8, "A...H"); // could optimize this, but w/e
|
||||
try t("ABCDEFGaH", 8, "A...aH");
|
||||
try t("ABCDEFGH", 15, "ABC...FGH");
|
||||
try t("❤︎a❤︎a❤︎a", 5, "❤︎...︎a"); // Variation selectors; not great, there's an additional U+FE0E before 'a'.
|
||||
try t("ą́ą́ą́ą́ą́ą́", 5, "ą́...̨́ą́"); // Combining marks, similarly bad.
|
||||
}
|
||||
|
||||
const StyleAttr = struct { fg: i16, bg: i16, attr: u32 };
|
||||
const StyleDef = struct {
|
||||
name: [:0]const u8,
|
||||
off: StyleAttr,
|
||||
dark: StyleAttr,
|
||||
darkbg: StyleAttr,
|
||||
fn style(self: *const @This()) StyleAttr {
|
||||
return switch (main.config.ui_color) {
|
||||
.off => self.off,
|
||||
.dark => self.dark,
|
||||
.darkbg => self.darkbg,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const styles = [_]StyleDef{
|
||||
.{ .name = "default",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.dark = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_WHITE, .bg = c.COLOR_BLACK, .attr = 0 } },
|
||||
.{ .name = "bold",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD },
|
||||
.dark = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_WHITE, .bg = c.COLOR_BLACK, .attr = c.A_BOLD } },
|
||||
.{ .name = "bold_hd",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD|c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_BLACK, .bg = c.COLOR_CYAN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_BLACK, .bg = c.COLOR_CYAN, .attr = c.A_BOLD } },
|
||||
.{ .name = "box_title",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD },
|
||||
.dark = .{ .fg = c.COLOR_BLUE, .bg = -1, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_BLUE, .bg = c.COLOR_BLACK, .attr = c.A_BOLD } },
|
||||
.{ .name = "hd", // header + footer
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_BLACK, .bg = c.COLOR_CYAN, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_BLACK, .bg = c.COLOR_CYAN, .attr = 0 } },
|
||||
.{ .name = "sel",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_WHITE, .bg = c.COLOR_GREEN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_WHITE, .bg = c.COLOR_GREEN, .attr = c.A_BOLD } },
|
||||
.{ .name = "num",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.dark = .{ .fg = c.COLOR_YELLOW, .bg = -1, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_BLACK, .attr = c.A_BOLD } },
|
||||
.{ .name = "num_hd",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_CYAN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_CYAN, .attr = c.A_BOLD } },
|
||||
.{ .name = "num_sel",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_GREEN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_GREEN, .attr = c.A_BOLD } },
|
||||
.{ .name = "key",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD },
|
||||
.dark = .{ .fg = c.COLOR_YELLOW, .bg = -1, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_BLACK, .attr = c.A_BOLD } },
|
||||
.{ .name = "key_hd",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_BOLD|c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_CYAN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_YELLOW, .bg = c.COLOR_CYAN, .attr = c.A_BOLD } },
|
||||
.{ .name = "dir",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.dark = .{ .fg = c.COLOR_BLUE, .bg = -1, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_BLUE, .bg = c.COLOR_BLACK, .attr = c.A_BOLD } },
|
||||
.{ .name = "dir_sel",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_BLUE, .bg = c.COLOR_GREEN, .attr = c.A_BOLD },
|
||||
.darkbg = .{ .fg = c.COLOR_BLUE, .bg = c.COLOR_GREEN, .attr = c.A_BOLD } },
|
||||
.{ .name = "flag",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.dark = .{ .fg = c.COLOR_RED, .bg = -1, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_RED, .bg = c.COLOR_BLACK, .attr = 0 } },
|
||||
.{ .name = "flag_sel",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_RED, .bg = c.COLOR_GREEN, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_RED, .bg = c.COLOR_GREEN, .attr = 0 } },
|
||||
.{ .name = "graph",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = 0 },
|
||||
.dark = .{ .fg = c.COLOR_MAGENTA, .bg = -1, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_MAGENTA, .bg = c.COLOR_BLACK, .attr = 0 } },
|
||||
.{ .name = "graph_sel",
|
||||
.off = .{ .fg = -1, .bg = -1, .attr = c.A_REVERSE },
|
||||
.dark = .{ .fg = c.COLOR_MAGENTA, .bg = c.COLOR_GREEN, .attr = 0 },
|
||||
.darkbg = .{ .fg = c.COLOR_MAGENTA, .bg = c.COLOR_GREEN, .attr = 0 } },
|
||||
};
|
||||
|
||||
pub const Style = lbl: {
|
||||
var fields: [styles.len]std.builtin.Type.EnumField = undefined;
|
||||
for (&fields, styles, 0..) |*field, s, i| {
|
||||
field.* = .{
|
||||
.name = s.name,
|
||||
.value = i,
|
||||
};
|
||||
}
|
||||
break :lbl @Type(.{
|
||||
.@"enum" = .{
|
||||
.tag_type = u8,
|
||||
.fields = &fields,
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
.is_exhaustive = true,
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const ui = @This();
|
||||
|
||||
pub const Bg = enum {
|
||||
default, hd, sel,
|
||||
|
||||
// Set the style to the selected bg combined with the given fg.
|
||||
pub fn fg(self: @This(), s: Style) void {
|
||||
ui.style(switch (self) {
|
||||
.default => s,
|
||||
.hd =>
|
||||
switch (s) {
|
||||
.default => Style.hd,
|
||||
.key => Style.key_hd,
|
||||
.num => Style.num_hd,
|
||||
else => unreachable,
|
||||
},
|
||||
.sel =>
|
||||
switch (s) {
|
||||
.default => Style.sel,
|
||||
.num => Style.num_sel,
|
||||
.dir => Style.dir_sel,
|
||||
.flag => Style.flag_sel,
|
||||
.graph => Style.graph_sel,
|
||||
else => unreachable,
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
fn updateSize() void {
|
||||
// getmax[yx] macros are marked as "legacy", but Zig can't deal with the "proper" getmaxyx macro.
|
||||
rows = @intCast(c.getmaxy(c.stdscr));
|
||||
cols = @intCast(c.getmaxx(c.stdscr));
|
||||
}
|
||||
|
||||
fn clearScr() void {
|
||||
// Send a "clear from cursor to end of screen" instruction, to clear a
|
||||
// potential line left behind from scanning in -1 mode.
|
||||
std.debug.print("\x1b[J", .{});
|
||||
}
|
||||
|
||||
pub fn init() void {
|
||||
if (inited) return;
|
||||
clearScr();
|
||||
if (main.config.nc_tty) {
|
||||
const tty = c.fopen("/dev/tty", "r+");
|
||||
if (tty == null) die("Error opening /dev/tty: {s}.\n", .{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||
const term = c.newterm(null, tty, tty);
|
||||
if (term == null) die("Error initializing ncurses.\n", .{});
|
||||
_ = c.set_term(term);
|
||||
} else {
|
||||
if (c.initscr() == null) die("Error initializing ncurses.\n", .{});
|
||||
}
|
||||
updateSize();
|
||||
_ = c.cbreak();
|
||||
_ = c.noecho();
|
||||
_ = c.curs_set(0);
|
||||
_ = c.keypad(c.stdscr, true);
|
||||
|
||||
_ = c.start_color();
|
||||
_ = c.use_default_colors();
|
||||
for (styles, 0..) |s, i| _ = c.init_pair(@as(i16, @intCast(i+1)), s.style().fg, s.style().bg);
|
||||
_ = c.bkgd(@intCast(c.COLOR_PAIR(@intFromEnum(Style.default)+1)));
|
||||
inited = true;
|
||||
}
|
||||
|
||||
pub fn deinit() void {
|
||||
if (!inited) {
|
||||
clearScr();
|
||||
return;
|
||||
}
|
||||
_ = c.erase();
|
||||
_ = c.refresh();
|
||||
_ = c.endwin();
|
||||
inited = false;
|
||||
}
|
||||
|
||||
pub fn style(s: Style) void {
|
||||
_ = c.attr_set(styles[@intFromEnum(s)].style().attr, @intFromEnum(s)+1, null);
|
||||
}
|
||||
|
||||
pub fn move(y: u32, x: u32) void {
|
||||
_ = c.move(@as(i32, @intCast(y)), @as(i32, @intCast(x)));
|
||||
}
|
||||
|
||||
// Wraps to the next line if the text overflows, not sure how to disable that.
|
||||
// (Well, addchstr() does that, but not entirely sure I want to go that way.
|
||||
// Does that even work with UTF-8? Or do I really need to go wchar madness?)
|
||||
pub fn addstr(s: [:0]const u8) void {
|
||||
_ = c.addstr(s.ptr);
|
||||
}
|
||||
|
||||
// Not to be used for strings that may end up >256 bytes.
|
||||
pub fn addprint(comptime fmt: []const u8, args: anytype) void {
|
||||
var buf: [256:0]u8 = undefined;
|
||||
const s = std.fmt.bufPrintZ(&buf, fmt, args) catch unreachable;
|
||||
addstr(s);
|
||||
}
|
||||
|
||||
pub fn addch(ch: c.chtype) void {
|
||||
_ = c.addch(ch);
|
||||
}
|
||||
|
||||
// Format an integer to a human-readable size string.
|
||||
// num() = "###.#"
|
||||
// unit = " XB" or " XiB"
|
||||
// Concatenated, these take 8 columns in SI mode or 9 otherwise.
|
||||
pub const FmtSize = struct {
|
||||
buf: [5:0]u8,
|
||||
unit: [:0]const u8,
|
||||
|
||||
fn init(u: [:0]const u8, n: u64, mul: u64, div: u64) FmtSize {
|
||||
return .{
|
||||
.unit = u,
|
||||
.buf = util.fmt5dec(@intCast( ((n*mul) +| (div / 2)) / div )),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fmt(v: u64) FmtSize {
|
||||
if (main.config.si) {
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 999_950) { return FmtSize.init(" kB", v, 1, 100); }
|
||||
else if (v < 999_950_000) { return FmtSize.init(" MB", v, 1, 100_000); }
|
||||
else if (v < 999_950_000_000) { return FmtSize.init(" GB", v, 1, 100_000_000); }
|
||||
else if (v < 999_950_000_000_000) { return FmtSize.init(" TB", v, 1, 100_000_000_000); }
|
||||
else if (v < 999_950_000_000_000_000) { return FmtSize.init(" PB", v, 1, 100_000_000_000_000); }
|
||||
else { return FmtSize.init(" EB", v, 1, 100_000_000_000_000_000); }
|
||||
} else {
|
||||
// Cutoff values are obtained by calculating 999.949999999999999999999999 * div with an infinite-precision calculator.
|
||||
// (Admittedly, this precision is silly)
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 1023949) { return FmtSize.init(" KiB", v, 10, 1<<10); }
|
||||
else if (v < 1048523572) { return FmtSize.init(" MiB", v, 10, 1<<20); }
|
||||
else if (v < 1073688136909) { return FmtSize.init(" GiB", v, 10, 1<<30); }
|
||||
else if (v < 1099456652194612) { return FmtSize.init(" TiB", v, 10, 1<<40); }
|
||||
else if (v < 1125843611847281869) { return FmtSize.init(" PiB", v, 10, 1<<50); }
|
||||
else { return FmtSize.init(" EiB", v, 1, (1<<60)/10); }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num(self: *const FmtSize) [:0]const u8 {
|
||||
return &self.buf;
|
||||
}
|
||||
|
||||
fn testEql(self: FmtSize, exp: []const u8) !void {
|
||||
var buf: [10]u8 = undefined;
|
||||
try std.testing.expectEqualStrings(exp, try std.fmt.bufPrint(&buf, "{s}{s}", .{ self.num(), self.unit }));
|
||||
}
|
||||
};
|
||||
|
||||
test "fmtsize" {
|
||||
main.config.si = true;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1049).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1050).testEql(" 1.1 kB");
|
||||
try FmtSize.fmt( 999_899).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_949).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_950).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 1000_000).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 999_850_009).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_899_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_900_000).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_949_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_950_000).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt( 999_999_999).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 18.4 EB");
|
||||
|
||||
main.config.si = false;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 1024).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 102400).testEql("100.0 KiB");
|
||||
try FmtSize.fmt( 1023898).testEql("999.9 KiB");
|
||||
try FmtSize.fmt( 1023949).testEql(" 1.0 MiB");
|
||||
try FmtSize.fmt( 1048523571).testEql("999.9 MiB");
|
||||
try FmtSize.fmt( 1048523572).testEql(" 1.0 GiB");
|
||||
try FmtSize.fmt( 1073688136908).testEql("999.9 GiB");
|
||||
try FmtSize.fmt( 1073688136909).testEql(" 1.0 TiB");
|
||||
try FmtSize.fmt( 1099456652194611).testEql("999.9 TiB");
|
||||
try FmtSize.fmt( 1099456652194612).testEql(" 1.0 PiB");
|
||||
try FmtSize.fmt(1125843611847281868).testEql("999.9 PiB");
|
||||
try FmtSize.fmt(1125843611847281869).testEql(" 1.0 EiB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 16.0 EiB");
|
||||
}
|
||||
|
||||
// Print a formatted human-readable size string onto the given background.
|
||||
pub fn addsize(bg: Bg, v: u64) void {
|
||||
const r = FmtSize.fmt(v);
|
||||
bg.fg(.num);
|
||||
addstr(r.num());
|
||||
bg.fg(.default);
|
||||
addstr(r.unit);
|
||||
}
|
||||
|
||||
// Print a full decimal number with thousand separators.
|
||||
// Max: 18,446,744,073,709,551,615 -> 26 columns
|
||||
// (Assuming thousands_sep takes a single column)
|
||||
pub fn addnum(bg: Bg, v: u64) void {
|
||||
var buf: [32]u8 = undefined;
|
||||
const s = std.fmt.bufPrint(&buf, "{d}", .{v}) catch unreachable;
|
||||
var f: [64:0]u8 = undefined;
|
||||
var i: usize = 0;
|
||||
for (s, 0..) |digit, n| {
|
||||
if (n != 0 and (s.len - n) % 3 == 0) {
|
||||
for (main.config.thousands_sep) |ch| {
|
||||
f[i] = ch;
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
f[i] = digit;
|
||||
i += 1;
|
||||
}
|
||||
f[i] = 0;
|
||||
bg.fg(.num);
|
||||
addstr(&f);
|
||||
bg.fg(.default);
|
||||
}
|
||||
|
||||
// Print a file mode, takes 10 columns
|
||||
pub fn addmode(mode: u32) void {
|
||||
addch(switch (mode & std.posix.S.IFMT) {
|
||||
std.posix.S.IFDIR => 'd',
|
||||
std.posix.S.IFREG => '-',
|
||||
std.posix.S.IFLNK => 'l',
|
||||
std.posix.S.IFIFO => 'p',
|
||||
std.posix.S.IFSOCK => 's',
|
||||
std.posix.S.IFCHR => 'c',
|
||||
std.posix.S.IFBLK => 'b',
|
||||
else => '?'
|
||||
});
|
||||
addch(if (mode & 0o400 > 0) 'r' else '-');
|
||||
addch(if (mode & 0o200 > 0) 'w' else '-');
|
||||
addch(if (mode & 0o4000 > 0) 's' else if (mode & 0o100 > 0) @as(u7, 'x') else '-');
|
||||
addch(if (mode & 0o040 > 0) 'r' else '-');
|
||||
addch(if (mode & 0o020 > 0) 'w' else '-');
|
||||
addch(if (mode & 0o2000 > 0) 's' else if (mode & 0o010 > 0) @as(u7, 'x') else '-');
|
||||
addch(if (mode & 0o004 > 0) 'r' else '-');
|
||||
addch(if (mode & 0o002 > 0) 'w' else '-');
|
||||
addch(if (mode & 0o1000 > 0) (if (std.posix.S.ISDIR(mode)) @as(u7, 't') else 'T') else if (mode & 0o001 > 0) @as(u7, 'x') else '-');
|
||||
}
|
||||
|
||||
// Print a timestamp, takes 25 columns
|
||||
pub fn addts(bg: Bg, ts: u64) void {
|
||||
const t = util.castClamp(c.time_t, ts);
|
||||
var buf: [32:0]u8 = undefined;
|
||||
const len = c.strftime(&buf, buf.len, "%Y-%m-%d %H:%M:%S %z", c.localtime(&t));
|
||||
if (len > 0) {
|
||||
bg.fg(.num);
|
||||
ui.addstr(buf[0..len:0]);
|
||||
} else {
|
||||
bg.fg(.default);
|
||||
ui.addstr(" invalid mtime");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hline(ch: c.chtype, len: u32) void {
|
||||
_ = c.hline(ch, @as(i32, @intCast(len)));
|
||||
}
|
||||
|
||||
// Draws a bordered box in the center of the screen.
|
||||
pub const Box = struct {
|
||||
start_row: u32,
|
||||
start_col: u32,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn create(height: u32, width: u32, title: [:0]const u8) Self {
|
||||
const s = Self{
|
||||
.start_row = (rows>>1) -| (height>>1),
|
||||
.start_col = (cols>>1) -| (width>>1),
|
||||
};
|
||||
style(.default);
|
||||
if (width < 6 or height < 3) return s;
|
||||
|
||||
const acs_map = @extern(*[128]c.chtype, .{ .name = "acs_map" });
|
||||
const ulcorner = acs_map['l'];
|
||||
const llcorner = acs_map['m'];
|
||||
const urcorner = acs_map['k'];
|
||||
const lrcorner = acs_map['j'];
|
||||
const acs_hline = acs_map['q'];
|
||||
const acs_vline = acs_map['x'];
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < height) : (i += 1) {
|
||||
s.move(i, 0);
|
||||
addch(if (i == 0) ulcorner else if (i == height-1) llcorner else acs_vline);
|
||||
hline(if (i == 0 or i == height-1) acs_hline else ' ', width-2);
|
||||
s.move(i, width-1);
|
||||
addch(if (i == 0) urcorner else if (i == height-1) lrcorner else acs_vline);
|
||||
}
|
||||
|
||||
s.move(0, 3);
|
||||
style(.box_title);
|
||||
addch(' ');
|
||||
addstr(title);
|
||||
addch(' ');
|
||||
style(.default);
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn tab(s: Self, col: u32, sel: bool, num: u3, label: [:0]const u8) void {
|
||||
const bg: Bg = if (sel) .hd else .default;
|
||||
s.move(0, col);
|
||||
bg.fg(.key);
|
||||
addch('0' + @as(u8, num));
|
||||
bg.fg(.default);
|
||||
addch(':');
|
||||
addstr(label);
|
||||
style(.default);
|
||||
}
|
||||
|
||||
// Move the global cursor to the given coordinates inside the box.
|
||||
pub fn move(s: Self, row: u32, col: u32) void {
|
||||
ui.move(s.start_row + row, s.start_col + col);
|
||||
}
|
||||
};
|
||||
|
||||
// Returns 0 if no key was pressed in non-blocking mode.
|
||||
// Returns -1 if it was KEY_RESIZE, requiring a redraw of the screen.
|
||||
pub fn getch(block: bool) i32 {
|
||||
_ = c.nodelay(c.stdscr, !block);
|
||||
// getch() has a bad tendency to not set a sensible errno when it returns ERR.
|
||||
// In non-blocking mode, we can only assume that ERR means "no input yet".
|
||||
// In blocking mode, give it 100 tries with a 10ms delay in between,
|
||||
// then just give up and die to avoid an infinite loop and unresponsive program.
|
||||
for (0..100) |_| {
|
||||
const ch = c.getch();
|
||||
if (ch == c.KEY_RESIZE) {
|
||||
updateSize();
|
||||
return -1;
|
||||
}
|
||||
if (ch == c.ERR) {
|
||||
if (!block) return 0;
|
||||
sleep(10*std.time.ns_per_ms);
|
||||
continue;
|
||||
}
|
||||
return ch;
|
||||
}
|
||||
die("Error reading keyboard input, assuming TTY has been lost.\n(Potentially nonsensical error message: {s})\n",
|
||||
.{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||
}
|
||||
|
||||
fn waitInput() void {
|
||||
if (@hasDecl(std.io, "getStdIn")) {
|
||||
std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
} else {
|
||||
var buf: [512]u8 = undefined;
|
||||
var rd = std.fs.File.stdin().reader(&buf);
|
||||
_ = rd.interface.discardDelimiterExclusive('\n') catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runCmd(cmd: []const []const u8, cwd: ?[]const u8, env: *std.process.EnvMap, reporterr: bool) void {
|
||||
deinit();
|
||||
defer init();
|
||||
|
||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||
if (env.get("NCDU_LEVEL")) |l|
|
||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||
'0'...'8' => |d| &[1] u8{d+1},
|
||||
'9' => "9",
|
||||
else => "1"
|
||||
}) catch unreachable
|
||||
else
|
||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||
|
||||
var child = std.process.Child.init(cmd, main.allocator);
|
||||
child.cwd = cwd;
|
||||
child.env_map = env;
|
||||
|
||||
const term = child.spawnAndWait() catch |e| blk: {
|
||||
std.debug.print("Error running command: {s}\n\nPress enter to continue.\n", .{ ui.errorString(e) });
|
||||
waitInput();
|
||||
break :blk std.process.Child.Term{ .Exited = 0 };
|
||||
};
|
||||
|
||||
const n = switch (term) {
|
||||
.Exited => "error",
|
||||
.Signal => "signal",
|
||||
.Stopped => "stopped",
|
||||
.Unknown => "unknown",
|
||||
};
|
||||
const v = switch (term) { inline else => |v| v };
|
||||
if (term != .Exited or (reporterr and v != 0)) {
|
||||
std.debug.print("\nCommand returned with {s} code {}.\nPress enter to continue.\n", .{ n, v });
|
||||
waitInput();
|
||||
}
|
||||
}
|
||||
166
src/util.c
166
src/util.c
|
|
@ -1,166 +0,0 @@
|
|||
/* ncdu - NCurses Disk Usage
|
||||
|
||||
Copyright (c) 2007 Yoran Heling
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include "ncdu.h"
|
||||
|
||||
char cropsizedat[8];
|
||||
char cropdirdat[4096];
|
||||
|
||||
char *cropdir(const char *from, int s) {
|
||||
int i, j, o = strlen(from);
|
||||
if(o < s) {
|
||||
strcpy(cropdirdat, from);
|
||||
return(cropdirdat);
|
||||
}
|
||||
j=s/2-3;
|
||||
for(i=0; i<j; i++)
|
||||
cropdirdat[i] = from[i];
|
||||
cropdirdat[i] = '.';
|
||||
cropdirdat[++i] = '.';
|
||||
cropdirdat[++i] = '.';
|
||||
j=o-s;
|
||||
while(++i<s)
|
||||
cropdirdat[i] = from[j+i];
|
||||
cropdirdat[s] = '\0';
|
||||
return(cropdirdat);
|
||||
}
|
||||
|
||||
/* return value is always xxx.xXB = 8 bytes (including \0) */
|
||||
char *cropsize(const off_t from) {
|
||||
float r = from;
|
||||
char c = ' ';
|
||||
if(sflags & SF_SI) {
|
||||
if(r < 1000.0f) { }
|
||||
else if(r < 1000e3f) { c = 'k'; r/=1000.0f; }
|
||||
else if(r < 1000e6f) { c = 'M'; r/=1000e3f; }
|
||||
else if(r < 1000e9f) { c = 'G'; r/=1000e6f; }
|
||||
else { c = 'T'; r/=1000e9f; }
|
||||
} else {
|
||||
if(r < 1000.0f) { }
|
||||
else if(r < 1023e3f) { c = 'k'; r/=1024.0f; }
|
||||
else if(r < 1023e6f) { c = 'M'; r/=1048576.0f; }
|
||||
else if(r < 1023e9f) { c = 'G'; r/=1073741824.0f; }
|
||||
else { c = 'T'; r/=1099511627776.0f; }
|
||||
}
|
||||
sprintf(cropsizedat, "%5.1f%cB", r, c);
|
||||
return(cropsizedat);
|
||||
}
|
||||
|
||||
void ncresize(void) {
|
||||
int ch;
|
||||
getmaxyx(stdscr, winrows, wincols);
|
||||
while(!(sflags & SF_IGNS) && (winrows < 17 || wincols < 60)) {
|
||||
erase();
|
||||
mvaddstr(0, 0, "Warning: terminal too small,");
|
||||
mvaddstr(1, 1, "please either resize your terminal,");
|
||||
mvaddstr(2, 1, "press i to ignore, or press q to quit.");
|
||||
touchwin(stdscr);
|
||||
refresh();
|
||||
nodelay(stdscr, 0);
|
||||
ch = getch();
|
||||
getmaxyx(stdscr, winrows, wincols);
|
||||
if(ch == 'q') {
|
||||
erase();
|
||||
refresh();
|
||||
endwin();
|
||||
exit(0);
|
||||
}
|
||||
if(ch == 'i')
|
||||
sflags |= SF_IGNS;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void freedir_rec(struct dir *dr) {
|
||||
struct dir *tmp, *tmp2;
|
||||
tmp2 = dr;
|
||||
while(tmp2->prev != NULL)
|
||||
tmp2 = tmp2->prev;
|
||||
while((tmp = tmp2) != NULL) {
|
||||
if(tmp->sub) freedir_rec(tmp->sub);
|
||||
free(tmp->name);
|
||||
tmp2 = tmp->next;
|
||||
free(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/* remove a file/directory from the in-memory map */
|
||||
struct dir *freedir(struct dir *dr) {
|
||||
struct dir *tmp, *cur;
|
||||
|
||||
/* update sizes of parent directories */
|
||||
tmp = dr;
|
||||
if(dr->flags & FF_FILE) dr->files++;
|
||||
if(dr->flags & FF_DIR) dr->dirs++;
|
||||
while((tmp = tmp->parent) != NULL) {
|
||||
tmp->size -= dr->size;
|
||||
tmp->files -= dr->files;
|
||||
tmp->dirs -= dr->dirs;
|
||||
}
|
||||
|
||||
/* free dr->sub recursive */
|
||||
if(dr->sub) freedir_rec(dr->sub);
|
||||
|
||||
/* update references */
|
||||
cur = NULL;
|
||||
if(dr->next != NULL) { dr->next->prev = dr->prev; cur = dr->next; }
|
||||
if(dr->prev != NULL) { dr->prev->next = dr->next; cur = dr->prev; }
|
||||
if(cur != NULL)
|
||||
cur->flags |= FF_BSEL;
|
||||
|
||||
if(dr->parent->sub == dr) {
|
||||
if(dr->prev != NULL)
|
||||
dr->parent->sub = dr->prev;
|
||||
else if(dr->next != NULL)
|
||||
dr->parent->sub = dr->next;
|
||||
else {
|
||||
dr->parent->sub = NULL;
|
||||
cur = dr->parent;
|
||||
}
|
||||
}
|
||||
|
||||
free(dr->name);
|
||||
free(dr);
|
||||
|
||||
return(cur);
|
||||
}
|
||||
|
||||
char *getpath(struct dir *cur, char *to) {
|
||||
struct dir *d;
|
||||
d = cur;
|
||||
while(d->parent != NULL) {
|
||||
d->parent->sub = d;
|
||||
d = d->parent;
|
||||
}
|
||||
to[0] = '\0';
|
||||
while(d->parent != cur->parent) {
|
||||
if(d->parent != NULL && d->parent->name[strlen(d->parent->name)-1] != '/')
|
||||
strcat(to, "/");
|
||||
strcat(to, d->name);
|
||||
d = d->sub;
|
||||
}
|
||||
return to;
|
||||
}
|
||||
249
src/util.zig
Normal file
249
src/util.zig
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Cast any integer type to the target type, clamping the value to the supported maximum if necessary.
|
||||
pub fn castClamp(comptime T: type, x: anytype) T {
|
||||
// (adapted from std.math.cast)
|
||||
if (std.math.maxInt(@TypeOf(x)) > std.math.maxInt(T) and x > std.math.maxInt(T)) {
|
||||
return std.math.maxInt(T);
|
||||
} else if (std.math.minInt(@TypeOf(x)) < std.math.minInt(T) and x < std.math.minInt(T)) {
|
||||
return std.math.minInt(T);
|
||||
} else {
|
||||
return @intCast(x);
|
||||
}
|
||||
}
|
||||
|
||||
// Cast any integer type to the target type, truncating if necessary.
|
||||
pub fn castTruncate(comptime T: type, x: anytype) T {
|
||||
const Ti = @typeInfo(T).int;
|
||||
const Xi = @typeInfo(@TypeOf(x)).int;
|
||||
const nx: std.meta.Int(Ti.signedness, Xi.bits) = @bitCast(x);
|
||||
return if (Xi.bits > Ti.bits) @truncate(nx) else nx;
|
||||
}
|
||||
|
||||
// Multiplies by 512, saturating.
|
||||
pub fn blocksToSize(b: u64) u64 {
|
||||
return b *| 512;
|
||||
}
|
||||
|
||||
// Ensure the given arraylist buffer gets zero-terminated and returns a slice
|
||||
// into the buffer. The returned buffer is invalidated whenever the arraylist
|
||||
// is freed or written to.
|
||||
pub fn arrayListBufZ(buf: *std.ArrayListUnmanaged(u8), alloc: std.mem.Allocator) [:0]const u8 {
|
||||
buf.append(alloc, 0) catch unreachable;
|
||||
defer buf.items.len -= 1;
|
||||
return buf.items[0..buf.items.len-1:0];
|
||||
}
|
||||
|
||||
// Format an integer as right-aligned '###.#'.
|
||||
// Pretty much equivalent to:
|
||||
// std.fmt.bufPrintZ(.., "{d:>5.1}", @floatFromInt(n)/10.0);
|
||||
// Except this function doesn't pull in large float formatting tables.
|
||||
pub fn fmt5dec(n: u14) [5:0]u8 {
|
||||
std.debug.assert(n <= 9999);
|
||||
var buf: [5:0]u8 = " 0.0".*;
|
||||
var v = n;
|
||||
buf[4] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
buf[2] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[1] = '0' + @as(u8, @intCast(v % 10));
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[0] = '0' + @as(u8, @intCast(v));
|
||||
return buf;
|
||||
}
|
||||
|
||||
test "fmt5dec" {
|
||||
const eq = std.testing.expectEqualStrings;
|
||||
try eq(" 0.0", &fmt5dec(0));
|
||||
try eq(" 0.5", &fmt5dec(5));
|
||||
try eq(" 9.5", &fmt5dec(95));
|
||||
try eq(" 12.5", &fmt5dec(125));
|
||||
try eq("123.9", &fmt5dec(1239));
|
||||
try eq("999.9", &fmt5dec(9999));
|
||||
}
|
||||
|
||||
|
||||
// Straightforward Zig port of strnatcmp() from https://github.com/sourcefrog/natsort/
|
||||
// (Requiring nul-terminated strings is ugly, but we've got them anyway and it does simplify the code)
|
||||
pub fn strnatcmp(a: [:0]const u8, b: [:0]const u8) std.math.Order {
|
||||
var ai: usize = 0;
|
||||
var bi: usize = 0;
|
||||
const isDigit = std.ascii.isDigit;
|
||||
while (true) {
|
||||
while (std.ascii.isWhitespace(a[ai])) ai += 1;
|
||||
while (std.ascii.isWhitespace(b[bi])) bi += 1;
|
||||
|
||||
if (isDigit(a[ai]) and isDigit(b[bi])) {
|
||||
if (a[ai] == '0' or b[bi] == '0') { // compare_left
|
||||
while (true) {
|
||||
if (!isDigit(a[ai]) and !isDigit(b[bi])) break;
|
||||
if (!isDigit(a[ai])) return .lt;
|
||||
if (!isDigit(b[bi])) return .gt;
|
||||
if (a[ai] < b[bi]) return .lt;
|
||||
if (a[ai] > b[bi]) return .gt;
|
||||
ai += 1;
|
||||
bi += 1;
|
||||
}
|
||||
} else { // compare_right - for right-aligned numbers
|
||||
var bias = std.math.Order.eq;
|
||||
while (true) {
|
||||
if (!isDigit(a[ai]) and !isDigit(b[bi])) {
|
||||
if (bias != .eq or (a[ai] == 0 and b[bi] == 0)) return bias
|
||||
else break;
|
||||
}
|
||||
if (!isDigit(a[ai])) return .lt;
|
||||
if (!isDigit(b[bi])) return .gt;
|
||||
if (bias == .eq) {
|
||||
if (a[ai] < b[bi]) bias = .lt;
|
||||
if (a[ai] > b[bi]) bias = .gt;
|
||||
}
|
||||
ai += 1;
|
||||
bi += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (a[ai] == 0 and b[bi] == 0) return .eq;
|
||||
if (a[ai] < b[bi]) return .lt;
|
||||
if (a[ai] > b[bi]) return .gt;
|
||||
ai += 1;
|
||||
bi += 1;
|
||||
}
|
||||
}
|
||||
|
||||
test "strnatcmp" {
|
||||
// Test strings from https://github.com/sourcefrog/natsort/
|
||||
// Includes sorted-words, sorted-dates and sorted-fractions.
|
||||
const w = [_][:0]const u8{
|
||||
"1-02",
|
||||
"1-2",
|
||||
"1-20",
|
||||
"1.002.01",
|
||||
"1.002.03",
|
||||
"1.002.08",
|
||||
"1.009.02",
|
||||
"1.009.10",
|
||||
"1.009.20",
|
||||
"1.010.12",
|
||||
"1.011.02",
|
||||
"10-20",
|
||||
"1999-3-3",
|
||||
"1999-12-25",
|
||||
"2000-1-2",
|
||||
"2000-1-10",
|
||||
"2000-3-23",
|
||||
"fred",
|
||||
"jane",
|
||||
"pic01",
|
||||
"pic02",
|
||||
"pic02a",
|
||||
"pic02000",
|
||||
"pic05",
|
||||
"pic2",
|
||||
"pic3",
|
||||
"pic4",
|
||||
"pic 4 else",
|
||||
"pic 5",
|
||||
"pic 5 ",
|
||||
"pic 5 something",
|
||||
"pic 6",
|
||||
"pic 7",
|
||||
"pic100",
|
||||
"pic100a",
|
||||
"pic120",
|
||||
"pic121",
|
||||
"tom",
|
||||
"x2-g8",
|
||||
"x2-y08",
|
||||
"x2-y7",
|
||||
"x8-y8",
|
||||
};
|
||||
// Test each string against each other string, simple and thorough.
|
||||
const eq = std.testing.expectEqual;
|
||||
for (0..w.len) |i| {
|
||||
try eq(strnatcmp(w[i], w[i]), .eq);
|
||||
for (0..i) |j| try eq(strnatcmp(w[i], w[j]), .gt);
|
||||
for (i+1..w.len) |j| try eq(strnatcmp(w[i], w[j]), .lt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn expanduser(path: []const u8, alloc: std.mem.Allocator) ![:0]u8 {
|
||||
if (path.len == 0 or path[0] != '~') return alloc.dupeZ(u8, path);
|
||||
|
||||
const len = std.mem.indexOfScalar(u8, path, '/') orelse path.len;
|
||||
const home_raw = blk: {
|
||||
const pwd = pwd: {
|
||||
if (len == 1) {
|
||||
if (std.posix.getenvZ("HOME")) |p| break :blk p;
|
||||
break :pwd c.getpwuid(c.getuid());
|
||||
} else {
|
||||
const name = try alloc.dupeZ(u8, path[1..len]);
|
||||
defer alloc.free(name);
|
||||
break :pwd c.getpwnam(name.ptr);
|
||||
}
|
||||
};
|
||||
if (pwd != null)
|
||||
if (@as(*c.struct_passwd, pwd).pw_dir) |p|
|
||||
break :blk std.mem.span(p);
|
||||
return alloc.dupeZ(u8, path);
|
||||
};
|
||||
const home = std.mem.trimRight(u8, home_raw, "/");
|
||||
|
||||
if (home.len == 0 and path.len == len) return alloc.dupeZ(u8, "/");
|
||||
return try std.mem.concatWithSentinel(alloc, u8, &.{ home, path[len..] }, 0);
|
||||
}
|
||||
|
||||
|
||||
// Silly abstraction to read a file one line at a time. Only exists to help
|
||||
// with supporting both Zig 0.14 and 0.15, can be removed once 0.14 support is
|
||||
// dropped.
|
||||
pub const LineReader = if (@hasDecl(std.io, "bufferedReader")) struct {
|
||||
rd: std.io.BufferedReader(4096, std.fs.File.Reader),
|
||||
fbs: std.io.FixedBufferStream([]u8),
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{
|
||||
.rd = std.io.bufferedReader(f.reader()),
|
||||
.fbs = std.io.fixedBufferStream(buf),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
s.fbs.reset();
|
||||
s.rd.reader().streamUntilDelimiter(s.fbs.writer(), '\n', s.fbs.buffer.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (s.fbs.getPos() catch unreachable == 0) return null,
|
||||
else => |e| return e,
|
||||
};
|
||||
return s.fbs.getWritten();
|
||||
}
|
||||
|
||||
} else struct {
|
||||
rd: std.fs.File.Reader,
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{ .rd = f.readerStreaming(buf) };
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
// Can't use takeDelimiter() because that's not available in 0.15.1,
|
||||
// Can't use takeDelimiterExclusive() because that changed behavior in 0.15.2.
|
||||
const r = &s.rd.interface;
|
||||
const result = r.peekDelimiterInclusive('\n') catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
const remaining = r.buffer[r.seek..r.end];
|
||||
if (remaining.len == 0) return null;
|
||||
r.toss(remaining.len);
|
||||
return remaining;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
r.toss(result.len);
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
};
|
||||
Loading…
Reference in a new issue