mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-14 09:48:40 -09:00
Compare commits
107 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b3d0a670e | ||
|
|
f452244576 | ||
|
|
14bb8d0dd1 | ||
|
|
19cfdcf543 | ||
|
|
5129de737e | ||
|
|
68671a1af1 | ||
|
|
74c91768a0 | ||
|
|
ac4d689e22 | ||
|
|
66b875eb00 | ||
|
|
67f34090fb | ||
|
|
5b96a48f53 | ||
|
|
58e6458130 | ||
|
|
653c3bfe70 | ||
|
|
beac59fb12 | ||
|
|
d97a7f73dd | ||
|
|
35a9faadb2 | ||
|
|
e43d22ba3f | ||
|
|
f4e4694612 | ||
|
|
c9f3d39d3e | ||
|
|
2b4c1ca03e | ||
|
|
af7163acf6 | ||
|
|
5438312440 | ||
|
|
0918096301 | ||
|
|
ee1d80da6a | ||
|
|
93a81a3898 | ||
|
|
cf3a8f3043 | ||
|
|
f7fe61194b | ||
|
|
456cde16df | ||
|
|
3c77dc458a | ||
|
|
ce9921846c | ||
|
|
e0ab5d40c7 | ||
|
|
607b07a30e | ||
|
|
b4dc9f1d4d | ||
|
|
2e5c767d4c | ||
|
|
5d5182ede3 | ||
|
|
db96bc698c | ||
|
|
4873a7c765 | ||
|
|
49d43f89a1 | ||
|
|
e5a6a1c5ea | ||
|
|
5593fa2233 | ||
|
|
9d51df02c1 | ||
|
|
7ed209a8e5 | ||
|
|
4bd6e3daba | ||
|
|
2fcd7f370c | ||
|
|
232a4f8741 | ||
|
|
bdc730f1e5 | ||
|
|
df5845baad | ||
|
|
0e6967498f | ||
|
|
bd442673d2 | ||
|
|
28d9eaecab | ||
|
|
61d7fc8473 | ||
|
|
e142d012f0 | ||
|
|
39517c01a8 | ||
|
|
cc26ead5f8 | ||
|
|
ca46c7241f | ||
|
|
e324804cdd | ||
|
|
26229d7a63 | ||
|
|
4ef9c3e817 | ||
|
|
c30699f93b | ||
|
|
6b7983b2f5 | ||
|
|
9418079da3 | ||
|
|
18f322c532 | ||
|
|
252f7fc253 | ||
|
|
49ef7cc34e | ||
|
|
17e384b485 | ||
|
|
ad166de925 | ||
|
|
22dca22450 | ||
|
|
30d6ddf149 | ||
|
|
8fb2290d5e | ||
|
|
90b43755b8 | ||
|
|
8ad61e87c1 | ||
|
|
85e12beb1c | ||
|
|
025e5ee99e | ||
|
|
cd00ae50d1 | ||
|
|
5a0c8c6175 | ||
|
|
ebaa9b6a89 | ||
|
|
f25bc5cbf4 | ||
|
|
87d336baeb | ||
|
|
0a6bcee32b | ||
|
|
3c055810d0 | ||
|
|
f6bffa40c7 | ||
|
|
08d373881c | ||
|
|
dc42c91619 | ||
|
|
2b2b4473e5 | ||
|
|
9cbe1bc91f | ||
|
|
f28f69d831 | ||
|
|
a5e57ee5ad | ||
|
|
b0d4fbe94f | ||
|
|
99f92934c6 | ||
|
|
9b517f27b1 | ||
|
|
705bd8907d | ||
|
|
e5508ba9b4 | ||
|
|
6bb31a4653 | ||
|
|
7558fd7f8e | ||
|
|
1e56c8604e | ||
|
|
d2e8dd8a90 | ||
|
|
ddbed8b07f | ||
|
|
db51987446 | ||
|
|
cc12c90dbc | ||
|
|
f2541d42ba | ||
|
|
c41467f240 | ||
|
|
2f97601736 | ||
|
|
574a4348a3 | ||
|
|
0215f3569d | ||
|
|
f4f4af4ee5 | ||
|
|
6db150cc98 | ||
|
|
a4484f27f3 |
22 changed files with 4225 additions and 1693 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -4,6 +4,8 @@
|
|||
*.swp
|
||||
*~
|
||||
ncurses
|
||||
zstd
|
||||
static-*/
|
||||
zig-cache/
|
||||
zig-out/
|
||||
.zig-cache/
|
||||
|
|
|
|||
65
ChangeLog
65
ChangeLog
|
|
@ -1,6 +1,71 @@
|
|||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
2.9.2 - 2025-10-24
|
||||
- Still requires Zig 0.14 or 0.15
|
||||
- Fix hang on loading config file when compiled with Zig 0.15.2
|
||||
|
||||
2.9.1 - 2025-08-21
|
||||
- Add support for building with Zig 0.15
|
||||
- Zig 0.14 is still supported
|
||||
|
||||
2.9 - 2025-08-16
|
||||
- Still requires Zig 0.14
|
||||
- Add --delete-command option to replace the built-in file deletion
|
||||
- Move term cursor to selected option in delete confirmation window
|
||||
- Support binary import on older Linux kernels lacking statx() (may break
|
||||
again in the future, Zig does not officially support such old kernels)
|
||||
|
||||
2.8.2 - 2025-05-01
|
||||
- Still requires Zig 0.14
|
||||
- Fix a build error on MacOS
|
||||
|
||||
2.8.1 - 2025-04-28
|
||||
- Still requires Zig 0.14
|
||||
- Fix integer overflow in binary export
|
||||
- Fix crash when `fstatat()` returns EINVAL
|
||||
- Minor build system improvements
|
||||
|
||||
2.8 - 2025-03-05
|
||||
- Now requires Zig 0.14
|
||||
- Add support for @-prefixed lines to ignore errors in config file
|
||||
- List all supported options in `--help`
|
||||
- Use `kB` instead of `KB` in `--si` mode
|
||||
|
||||
2.7 - 2024-11-19
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Support transparent reading/writing of zstandard-compressed JSON
|
||||
- Add `--compress` and `--export-block-size` options
|
||||
- Perform tilde expansion on paths in the config file
|
||||
- Fix JSON import of escaped UTF-16 surrogate pairs
|
||||
- Fix incorrect field in root item when exporting to the binary format
|
||||
- Add -Dstrip build flag
|
||||
|
||||
2.6 - 2024-09-27
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add dependency on libzstd
|
||||
- Add new export format to support threaded export and low-memory browsing
|
||||
- Add `-O` and `--compress-level` CLI flags
|
||||
- Add progress indicator to hardlink counting stage
|
||||
- Fix displaying and exporting zero values when extended info is not available
|
||||
- Fix clearing screen in some error cases
|
||||
- Fix uncommon edge case in hardlink counting on refresh
|
||||
- Use integer math instead of floating point to format numbers
|
||||
|
||||
2.5 - 2024-07-24
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add parallel scanning with `-t,--threads` CLI flags
|
||||
- Improve JSON export and import performance
|
||||
- `--exclude-kernfs` is no longer checked on the top-level scan path
|
||||
- Fix entries sometimes not showing up after refresh
|
||||
- Fix file descriptor leak with `--exclude-caches` checking
|
||||
- Fix possible crash on invalid UTF8 when scanning in `-1` UI mode
|
||||
- Fix JSON export and import of the "other filesystem" flag
|
||||
- Fix JSON import containing directories with a read error
|
||||
- Fix mtime display of 'special' files
|
||||
- Fix edge case bad performance when deleting hardlinks with many links
|
||||
- Increased memory use for hardlinks (by ~10% in extreme cases, sorry)
|
||||
|
||||
2.4 - 2024-04-21
|
||||
- Now requires Zig 0.12
|
||||
- Revert default color scheme back to 'off'
|
||||
|
|
|
|||
26
Makefile
26
Makefile
|
|
@ -9,7 +9,7 @@ ZIG ?= zig
|
|||
PREFIX ?= /usr/local
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man/man1
|
||||
ZIG_FLAGS ?= --release
|
||||
ZIG_FLAGS ?= --release=fast -Dstrip
|
||||
|
||||
NCDU_VERSION=$(shell grep 'program_version = "' src/main.zig | sed -e 's/^.*"\(.\+\)".*$$/\1/')
|
||||
|
||||
|
|
@ -52,27 +52,41 @@ dist:
|
|||
rm -rf ncdu-${NCDU_VERSION}
|
||||
|
||||
|
||||
# ASSUMPTION: the ncurses source tree has been extracted into ncurses/
|
||||
# ASSUMPTION:
|
||||
# - the ncurses source tree has been extracted into ncurses/
|
||||
# - the zstd source tree has been extracted into zstd/
|
||||
# Would be nicer to do all this with the Zig build system, but no way am I
|
||||
# going to write build.zig's for these projects.
|
||||
static-%.tar.gz:
|
||||
mkdir -p static-$*/nc static-$*/inst/pkg
|
||||
cp -R zstd/lib static-$*/zstd
|
||||
make -C static-$*/zstd -j8 libzstd.a V=1\
|
||||
ZSTD_LIB_DICTBUILDER=0\
|
||||
ZSTD_LIB_MINIFY=1\
|
||||
ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1\
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"
|
||||
cd static-$*/nc && ../../ncurses/configure --prefix="`pwd`/../inst"\
|
||||
--with-pkg-config-libdir="`pwd`/../inst/pkg"\
|
||||
--without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs\
|
||||
--without-tests --enable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||
--without-tests --disable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||
--without-gpm --without-sysmouse --enable-widec --with-default-terminfo-dir=/usr/share/terminfo\
|
||||
--with-terminfo-dirs=/usr/share/terminfo:/lib/terminfo:/usr/local/share/terminfo\
|
||||
--with-fallbacks="screen linux vt100 xterm xterm-256color" --host=$*\
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"\
|
||||
CPPFLAGS=-D_GNU_SOURCE && make -j8 && make install.libs
|
||||
CPPFLAGS=-D_GNU_SOURCE && make -j8
|
||||
@# zig-build - cleaner approach but doesn't work, results in a dynamically linked binary.
|
||||
@#cd static-$* && PKG_CONFIG_LIBDIR="`pwd`/inst/pkg" zig build -Dtarget=$*
|
||||
@# --build-file ../build.zig --search-prefix inst/ --cache-dir zig -Drelease-fast=true
|
||||
@# Alternative approach, bypassing zig-build
|
||||
cd static-$* && ${ZIG} build-exe -target $*\
|
||||
-Iinst/include -Iinst/include/ncursesw -lc inst/lib/libncursesw.a\
|
||||
-Inc/include -Izstd -lc nc/lib/libncursesw.a zstd/libzstd.a\
|
||||
--cache-dir zig-cache -static -fstrip -O ReleaseFast ../src/main.zig
|
||||
@# My system's strip can't deal with arm binaries and zig doesn't wrap a strip alternative.
|
||||
@# Whatever, just let it error for those.
|
||||
strip -R .eh_frame -R .eh_frame_hdr static-$*/main || true
|
||||
cd static-$* && mv main ncdu && tar -czf ../static-$*.tar.gz ncdu
|
||||
rm -rf static-$*
|
||||
|
||||
|
|
|
|||
|
|
@ -19,9 +19,10 @@ C version (1.x).
|
|||
|
||||
## Requirements
|
||||
|
||||
- Zig 0.12.0
|
||||
- Zig 0.14 or 0.15
|
||||
- Some sort of POSIX-like OS
|
||||
- ncurses libraries and header files
|
||||
- ncurses
|
||||
- libzstd
|
||||
|
||||
## Install
|
||||
|
||||
|
|
|
|||
28
build.zig
28
build.zig
|
|
@ -5,24 +5,28 @@ const std = @import("std");
|
|||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{
|
||||
.preferred_optimize_mode = .ReleaseFast,
|
||||
});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const pie = b.option(bool, "pie", "Build with PIE support (by default false)") orelse false;
|
||||
const pie = b.option(bool, "pie", "Build with PIE support (by default: target-dependant)");
|
||||
const strip = b.option(bool, "strip", "Strip debugging info (by default false)") orelse false;
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "ncdu",
|
||||
const main_mod = b.createModule(.{
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.strip = strip,
|
||||
.link_libc = true,
|
||||
});
|
||||
main_mod.linkSystemLibrary("ncursesw", .{});
|
||||
main_mod.linkSystemLibrary("zstd", .{});
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "ncdu",
|
||||
.root_module = main_mod,
|
||||
});
|
||||
exe.pie = pie;
|
||||
exe.root_module.linkSystemLibrary("ncursesw", .{});
|
||||
// https://github.com/ziglang/zig/blob/b52be973dfb7d1408218b8e75800a2da3dc69108/build.zig#L551-L554
|
||||
if (target.result.isDarwin()) {
|
||||
// https://github.com/ziglang/zig/blob/faccd79ca5debbe22fe168193b8de54393257604/build.zig#L745-L748
|
||||
if (target.result.os.tag.isDarwin()) {
|
||||
// useful for package maintainers
|
||||
exe.headerpad_max_install_names = true;
|
||||
}
|
||||
|
|
@ -38,13 +42,9 @@ pub fn build(b: *std.Build) void {
|
|||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.link_libc = true,
|
||||
.root_module = main_mod,
|
||||
});
|
||||
unit_tests.pie = pie;
|
||||
unit_tests.root_module.linkSystemLibrary("ncursesw", .{});
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
|
||||
|
|
|
|||
148
ncdu.1
148
ncdu.1
|
|
@ -1,15 +1,17 @@
|
|||
.\" SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
.\" SPDX-License-Identifier: MIT
|
||||
.Dd April 20, 2024
|
||||
.Dd August 16, 2025
|
||||
.Dt NCDU 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm ncdu
|
||||
.Nd NCurses Disk Usage
|
||||
.
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Op Fl f Ar file
|
||||
.Op Fl o Ar file
|
||||
.Op Fl O Ar file
|
||||
.Op Fl e , \-extended , \-no\-extended
|
||||
.Op Fl \-ignore\-config
|
||||
.Op Fl x , \-one\-file\-system , \-cross\-file\-system
|
||||
|
|
@ -18,6 +20,10 @@
|
|||
.Op Fl \-include\-caches , \-exclude\-caches
|
||||
.Op Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||
.Op Fl \-include\-kernfs , \-exclude\-kernfs
|
||||
.Op Fl t , \-threads Ar num
|
||||
.Op Fl c , \-compress , \-no\-compress
|
||||
.Op Fl \-compress\-level Ar num
|
||||
.Op Fl \-export\-block\-size Ar num
|
||||
.Op Fl 0 , 1 , 2
|
||||
.Op Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||
.Op Fl \-enable\-shell , \-disable\-shell
|
||||
|
|
@ -38,17 +44,20 @@
|
|||
.Op Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||
.Op Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||
.Op Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||
.Op Fl \-delete\-command Ar command
|
||||
.Op Fl \-color Ar off | dark | dark-bg
|
||||
.Op Ar path
|
||||
.Nm
|
||||
.Op Fl h , \-help
|
||||
.Nm
|
||||
.Op Fl v , V , \-version
|
||||
.
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
(NCurses Disk Usage) is an interactive curses-based version of the well-known
|
||||
.Xr du 1 ,
|
||||
and provides a fast way to see what directories are using your disk space.
|
||||
.
|
||||
.Sh OPTIONS
|
||||
.Ss Mode Selection
|
||||
.Bl -tag -width Ds
|
||||
|
|
@ -59,10 +68,13 @@ Print version and quit.
|
|||
.It Fl f Ar file
|
||||
Load the given file, which has earlier been created with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
flag.
|
||||
If
|
||||
.Ar file
|
||||
is equivalent to '\-', the file is read from standard input.
|
||||
Reading from standard input is only supported for the JSON format.
|
||||
.Pp
|
||||
For the sake of preventing a screw-up, the current version of
|
||||
.Nm
|
||||
|
|
@ -73,7 +85,7 @@ will be disabled.
|
|||
.It Ar dir
|
||||
Scan the given directory.
|
||||
.It Fl o Ar file
|
||||
Export all necessary information to
|
||||
Export the directory tree in JSON format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
|
|
@ -87,6 +99,27 @@ directory with many files.
|
|||
uncompressed, or a little over 100 KiB when compressed with gzip.
|
||||
This scales linearly, so be prepared to handle a few tens of megabytes when
|
||||
dealing with millions of files.
|
||||
.Pp
|
||||
Consider enabling
|
||||
.Fl c
|
||||
to output Zstandard-compressed JSON, which can significantly reduce size of the
|
||||
exported data.
|
||||
.Pp
|
||||
When running a multi-threaded scan or when scanning a directory tree that may
|
||||
not fit in memory, consider using
|
||||
.Fl O
|
||||
instead.
|
||||
.It Fl O Ar file
|
||||
Export the directory tree in binary format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
.Ar file
|
||||
is '\-', the data is written to standard output.
|
||||
The binary format has built-in compression, supports low-memory multi-threaded
|
||||
export (in combination with
|
||||
.Fl t )
|
||||
and can be browsed without importing the entire directory tree into memory.
|
||||
.It Fl e , \-extended , \-no\-extended
|
||||
Enable/disable extended information mode.
|
||||
This will, in addition to the usual file information, also read the ownership,
|
||||
|
|
@ -105,6 +138,7 @@ using 'm' and 'M', respectively.
|
|||
.It Fl \-ignore\-config
|
||||
Do not attempt to load any configuration files.
|
||||
.El
|
||||
.
|
||||
.Ss Scan Options
|
||||
These options affect the scanning progress, they have no effect when importing
|
||||
directory information from a file.
|
||||
|
|
@ -147,7 +181,52 @@ The exact counting behavior of this flag is subject to change in the future.
|
|||
.Pp
|
||||
The complete list of currently known pseudo filesystems is: binfmt, bpf, cgroup,
|
||||
cgroup2, debug, devpts, proc, pstore, security, selinux, sys, trace.
|
||||
.It Fl t , \-threads Ar num
|
||||
Number of threads to use when scanning the filesystem, defaults to 1.
|
||||
.Pp
|
||||
In single-threaded mode, the JSON export (see
|
||||
.Fl o )
|
||||
can operate with very little memory, but in multi-threaded mode the entire
|
||||
directory tree is first constructed in memory and written out after the
|
||||
filesystem scan has completed,
|
||||
This causes a delay in output and requires significantly more memory for large
|
||||
directory trees.
|
||||
The binary format (see
|
||||
.Fl O )
|
||||
does not have this problem and supports efficient exporting with any number of
|
||||
threads.
|
||||
.El
|
||||
.
|
||||
.Ss Export Options
|
||||
These options affect behavior when exporting to file with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
options.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl c , \-compress , \-no\-compress
|
||||
Enable or disable Zstandard compression when exporting to JSON (see
|
||||
.Fl o ) .
|
||||
.It Fl \-compress\-level Ar num
|
||||
Set the Zstandard compression level when using
|
||||
.Fl O
|
||||
or
|
||||
.Fl c .
|
||||
Valid values are 1 (fastest) to 19 (slowest).
|
||||
Defaults to 4.
|
||||
.It Fl \-export\-block\-size Ar num
|
||||
Set the block size, in kibibytes, for the binary export format (see
|
||||
.Fl O ) .
|
||||
Larger blocks require more memory but result in better compression efficiency.
|
||||
This option can be combined with a higher
|
||||
.Fl \-compress\-level
|
||||
for even better compression.
|
||||
.Pp
|
||||
Accepted values are between 4 and 16000.
|
||||
The defaults is to start at 64 KiB and then gradually increase the block size
|
||||
for large exports.
|
||||
.El
|
||||
.
|
||||
.Ss Interface Options
|
||||
.Bl -tag -width Ds
|
||||
.It Fl 0
|
||||
|
|
@ -159,9 +238,8 @@ When exporting the data with
|
|||
ncurses will not be initialized at all.
|
||||
This option is the default when exporting to standard output.
|
||||
.It Fl 1
|
||||
Similar to
|
||||
.Fl 0 ,
|
||||
but does give feedback on the scanning progress with a single line of output.
|
||||
Write progress information to the terminal, but don't open a full-screen
|
||||
ncurses interface.
|
||||
This option is the default when exporting to a file.
|
||||
.Pp
|
||||
In some cases, the ncurses browser interface which you'll see after the
|
||||
|
|
@ -209,7 +287,7 @@ when given twice it will also add
|
|||
thus ensuring that there is no way to modify the file system from within
|
||||
.Nm .
|
||||
.It Fl \-si , \-no\-si
|
||||
List sizes using base 10 prefixes, that is, powers of 1000 (KB, MB, etc), as
|
||||
List sizes using base 10 prefixes, that is, powers of 1000 (kB, MB, etc), as
|
||||
defined in the International System of Units (SI), instead of the usual base 2
|
||||
prefixes (KiB, MiB, etc).
|
||||
.It Fl \-disk\-usage , \-apparent\-size
|
||||
|
|
@ -282,6 +360,31 @@ Can be helpful when you accidentally press 'q' during or after a very long scan.
|
|||
Require a confirmation before deleting a file or directory.
|
||||
Enabled by default, but can be disabled if you're absolutely sure you won't
|
||||
accidentally press 'd'.
|
||||
.It Fl \-delete\-command Ar command
|
||||
When set to a non-empty string, replace the built-in file deletion feature with
|
||||
a custom shell command.
|
||||
.Pp
|
||||
The absolute path of the item to be deleted is appended to the given command
|
||||
and the result is evaluated in a shell.
|
||||
The command is run from the same directory that ncdu itself was started in.
|
||||
The
|
||||
.Ev NCDU_DELETE_PATH
|
||||
environment variable is set to the absolute path of the item to be deleted and
|
||||
.Ev NCDU_LEVEL
|
||||
is set in the same fashion as when spawning a shell from within ncdu.
|
||||
.Pp
|
||||
After command completion, the in-memory view of the selected item is refreshed
|
||||
and directory sizes are adjusted as necessary.
|
||||
This is not a full refresh of the complete directory tree, so if the item has
|
||||
been renamed or moved to another directory, it's new location is not
|
||||
automatically picked up.
|
||||
.Pp
|
||||
For example, to use
|
||||
.Xr rm 1
|
||||
interactive mode to prompt before each deletion:
|
||||
.Dl ncdu --no-confirm-delete --delete-command \[aq]rm -ri --\[aq]
|
||||
Or to move files to trash:
|
||||
.Dl ncdu --delete-command \[aq]gio trash --\[aq]
|
||||
.It Fl \-color Ar off | dark | dark-bg
|
||||
Set the color scheme.
|
||||
The following schemes are recognized:
|
||||
|
|
@ -297,6 +400,7 @@ color scheme that also works in terminals with a light background.
|
|||
The default is
|
||||
.Ar off .
|
||||
.El
|
||||
.
|
||||
.Sh CONFIGURATION
|
||||
.Nm
|
||||
can be configured by placing command-line options in
|
||||
|
|
@ -314,6 +418,7 @@ is given on the command line.
|
|||
.Pp
|
||||
The configuration file format is simply one command line option per line.
|
||||
Lines starting with '#' are ignored.
|
||||
A line can be prefixed with '@' to suppress errors while parsing the option.
|
||||
Example configuration file:
|
||||
.Bd -literal -offset indent
|
||||
# Always enable extended mode
|
||||
|
|
@ -324,7 +429,11 @@ Example configuration file:
|
|||
|
||||
# Exclude .git directories
|
||||
\-\-exclude .git
|
||||
|
||||
# Read excludes from ~/.ncduexcludes, ignore error if the file does not exist
|
||||
@--exclude-from ~/.ncduexcludes
|
||||
.Ed
|
||||
.
|
||||
.Sh KEYS
|
||||
.Bl -tag -width Ds
|
||||
.It ?
|
||||
|
|
@ -411,6 +520,7 @@ itself does not (currently) warn about or prevent this situation.
|
|||
.It q
|
||||
Quit
|
||||
.El
|
||||
.
|
||||
.Sh FILE FLAGS
|
||||
Entries in the browser interface may be prefixed by a one\-character flag.
|
||||
These flags have the following meaning:
|
||||
|
|
@ -434,37 +544,32 @@ Same file was already counted (hard link).
|
|||
.It e
|
||||
Empty directory.
|
||||
.El
|
||||
.
|
||||
.Sh EXAMPLES
|
||||
To scan and browse the directory you're currently in, all you need is a simple:
|
||||
.Dl ncdu
|
||||
If you want to scan a full filesystem, for example your root filesystem, then
|
||||
you'll want to use
|
||||
To scan a full filesystem, for example your root filesystem, you'll want to use
|
||||
.Fl x :
|
||||
.Dl ncdu \-x /
|
||||
.Pp
|
||||
Since scanning a large directory may take a while, you can scan a directory and
|
||||
export the results for later viewing:
|
||||
.Bd -literal -offset indent
|
||||
ncdu \-1xo\- / | gzip >export.gz
|
||||
ncdu \-1xO export.ncdu /
|
||||
# ...some time later:
|
||||
zcat export.gz | ncdu \-f\-
|
||||
ncdu \-f export.ncdu
|
||||
.Ed
|
||||
To export from a cron job, make sure to replace
|
||||
.Fl 1
|
||||
with
|
||||
.Fl 0
|
||||
to suppress any unnecessary output.
|
||||
to suppress unnecessary progress output.
|
||||
.Pp
|
||||
You can also export a directory and browse it once scanning is done:
|
||||
.Dl ncdu \-o\- | tee export.file | ./ncdu \-f\-
|
||||
The same is possible with gzip compression, but is a bit kludgey:
|
||||
.Dl ncdu \-o\- | gzip | tee export.gz | gunzip | ./ncdu \-f\-
|
||||
.Dl ncdu \-co\- | tee export.json.zst | ./ncdu \-f\-
|
||||
.Pp
|
||||
To scan a system remotely, but browse through the files locally:
|
||||
.Dl ssh \-C user@system ncdu \-o\- / | ./ncdu \-f\-
|
||||
The
|
||||
.Fl C
|
||||
option to ssh enables compression, which will be very useful over slow links.
|
||||
.Dl ssh user@system ncdu \-co\- / | ./ncdu \-f\-
|
||||
Remote scanning and local viewing has two major advantages when
|
||||
compared to running
|
||||
.Nm
|
||||
|
|
@ -473,6 +578,7 @@ the local system without any network latency, and
|
|||
.Nm
|
||||
does not keep the entire directory structure in memory when exporting, so this
|
||||
won't consume much memory on the remote system.
|
||||
.
|
||||
.Sh SEE ALSO
|
||||
.Xr du 1 ,
|
||||
.Xr tree 1 .
|
||||
|
|
@ -480,13 +586,15 @@ won't consume much memory on the remote system.
|
|||
.Nm
|
||||
has a website:
|
||||
.Lk https://dev.yorhel.nl/ncdu
|
||||
.
|
||||
.Sh AUTHORS
|
||||
Written by
|
||||
.An Yorhel Aq Mt projects@yorhel.nl
|
||||
.
|
||||
.Sh BUGS
|
||||
Directory hard links and firmlinks (MacOS) are not supported.
|
||||
They are not detected as being hard links, and will thus get scanned and
|
||||
counted multiple times.
|
||||
They are not detected as being hard links and will thus get scanned and counted
|
||||
multiple times.
|
||||
.Pp
|
||||
Some minor glitches may appear when displaying filenames that contain multibyte
|
||||
or multicolumn characters.
|
||||
|
|
|
|||
468
src/bin_export.zig
Normal file
468
src/bin_export.zig
Normal file
|
|
@ -0,0 +1,468 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var file_off: u64 = 0;
|
||||
var lock: std.Thread.Mutex = .{};
|
||||
var root_itemref: u64 = 0;
|
||||
};
|
||||
|
||||
pub const SIGNATURE = "\xbfncduEX1";
|
||||
|
||||
pub const ItemKey = enum(u5) {
|
||||
// all items
|
||||
type = 0, // EType
|
||||
name = 1, // bytes
|
||||
prev = 2, // itemref
|
||||
// Only for non-specials
|
||||
asize = 3, // u64
|
||||
dsize = 4, // u64
|
||||
// Only for .dir
|
||||
dev = 5, // u64 only if different from parent dir
|
||||
rderr = 6, // bool true = error reading directory list, false = error in sub-item, absent = no error
|
||||
cumasize = 7, // u64
|
||||
cumdsize = 8, // u64
|
||||
shrasize = 9, // u64
|
||||
shrdsize = 10, // u64
|
||||
items = 11, // u64
|
||||
sub = 12, // itemref only if dir is not empty
|
||||
// Only for .link
|
||||
ino = 13, // u64
|
||||
nlink = 14, // u32
|
||||
// Extended mode
|
||||
uid = 15, // u32
|
||||
gid = 16, // u32
|
||||
mode = 17, // u16
|
||||
mtime = 18, // u64
|
||||
_,
|
||||
};
|
||||
|
||||
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
||||
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
||||
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).@"enum".fields.len;
|
||||
|
||||
pub const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
||||
|
||||
inline fn bigu16(v: u16) [2]u8 { return @bitCast(std.mem.nativeToBig(u16, v)); }
|
||||
inline fn bigu32(v: u32) [4]u8 { return @bitCast(std.mem.nativeToBig(u32, v)); }
|
||||
inline fn bigu64(v: u64) [8]u8 { return @bitCast(std.mem.nativeToBig(u64, v)); }
|
||||
|
||||
inline fn blockHeader(id: u4, len: u28) [4]u8 { return bigu32((@as(u32, id) << 28) | len); }
|
||||
|
||||
inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(major)) << 5) | arg; }
|
||||
|
||||
|
||||
// (Uncompressed) data block size.
|
||||
// Start with 64k, then use increasingly larger block sizes as the export file
|
||||
// grows. This is both to stay within the block number limit of the index block
|
||||
// and because, with a larger index block, the reader will end up using more
|
||||
// memory anyway.
|
||||
fn blockSize(num: u32) usize {
|
||||
// block size uncompressed data in this num range
|
||||
// # mil # KiB # GiB
|
||||
return main.config.export_block_size
|
||||
orelse if (num < ( 1<<20)) 64<<10 // 64
|
||||
else if (num < ( 2<<20)) 128<<10 // 128
|
||||
else if (num < ( 4<<20)) 256<<10 // 512
|
||||
else if (num < ( 8<<20)) 512<<10 // 2048
|
||||
else if (num < (16<<20)) 1024<<10 // 8192
|
||||
else 2048<<10; // 32768
|
||||
}
|
||||
|
||||
// Upper bound on the return value of blockSize()
|
||||
// (config.export_block_size may be larger than the sizes listed above, let's
|
||||
// stick with the maximum block size supported by the file format to be safe)
|
||||
const MAX_BLOCK_SIZE: usize = 1<<28;
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
buf: []u8 = undefined,
|
||||
off: usize = MAX_BLOCK_SIZE, // pretend we have a full block to trigger a flush() for the first write
|
||||
block_num: u32 = std.math.maxInt(u32),
|
||||
itemref: u64 = 0, // ref of item currently being written
|
||||
|
||||
// unused, but kept around for easy debugging
|
||||
fn compressNone(in: []const u8, out: []u8) usize {
|
||||
@memcpy(out[0..in.len], in);
|
||||
return in.len;
|
||||
}
|
||||
|
||||
fn compressZstd(in: []const u8, out: []u8) usize {
|
||||
while (true) {
|
||||
const r = c.ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
||||
if (c.ZSTD_isError(r) == 0) return r;
|
||||
ui.oom(); // That *ought* to be the only reason the above call can fail.
|
||||
}
|
||||
}
|
||||
|
||||
fn createBlock(t: *Thread) std.ArrayListUnmanaged(u8) {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
if (t.block_num == std.math.maxInt(u32) or t.off == 0) return out;
|
||||
|
||||
out.ensureTotalCapacityPrecise(main.allocator, 12 + @as(usize, @intCast(c.ZSTD_COMPRESSBOUND(@as(c_int, @intCast(t.off)))))) catch unreachable;
|
||||
out.items.len = out.capacity;
|
||||
const bodylen = compressZstd(t.buf[0..t.off], out.items[8..]);
|
||||
out.items.len = 12 + bodylen;
|
||||
|
||||
out.items[0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
out.items[4..8].* = bigu32(t.block_num);
|
||||
out.items[8+bodylen..][0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
return out;
|
||||
}
|
||||
|
||||
fn flush(t: *Thread, expected_len: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
var block = createBlock(t);
|
||||
defer block.deinit(main.allocator);
|
||||
|
||||
global.lock.lock();
|
||||
defer global.lock.unlock();
|
||||
// This can only really happen when the root path exceeds our block size,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (expected_len > t.buf.len) ui.die("Error writing data: path too long.\n", .{});
|
||||
|
||||
if (block.items.len > 0) {
|
||||
if (global.file_off >= (1<<40)) ui.die("Export data file has grown too large, please report a bug.\n", .{});
|
||||
global.index.items[4..][t.block_num*8..][0..8].* = bigu64((global.file_off << 24) + block.items.len);
|
||||
global.file_off += block.items.len;
|
||||
global.fd.writeAll(block.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
}
|
||||
|
||||
t.off = 0;
|
||||
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
||||
global.index.appendSlice(main.allocator, &[1]u8{0}**8) catch unreachable;
|
||||
if (global.index.items.len + 12 >= (1<<28)) ui.die("Too many data blocks, please report a bug.\n", .{});
|
||||
|
||||
const newsize = blockSize(t.block_num);
|
||||
if (t.buf.len != newsize) t.buf = main.allocator.realloc(t.buf, newsize) catch unreachable;
|
||||
}
|
||||
|
||||
fn cborHead(t: *Thread, major: CborMajor, arg: u64) void {
|
||||
if (arg <= 23) {
|
||||
t.buf[t.off] = cborByte(major, @intCast(arg));
|
||||
t.off += 1;
|
||||
} else if (arg <= std.math.maxInt(u8)) {
|
||||
t.buf[t.off] = cborByte(major, 24);
|
||||
t.buf[t.off+1] = @truncate(arg);
|
||||
t.off += 2;
|
||||
} else if (arg <= std.math.maxInt(u16)) {
|
||||
t.buf[t.off] = cborByte(major, 25);
|
||||
t.buf[t.off+1..][0..2].* = bigu16(@intCast(arg));
|
||||
t.off += 3;
|
||||
} else if (arg <= std.math.maxInt(u32)) {
|
||||
t.buf[t.off] = cborByte(major, 26);
|
||||
t.buf[t.off+1..][0..4].* = bigu32(@intCast(arg));
|
||||
t.off += 5;
|
||||
} else {
|
||||
t.buf[t.off] = cborByte(major, 27);
|
||||
t.buf[t.off+1..][0..8].* = bigu64(arg);
|
||||
t.off += 9;
|
||||
}
|
||||
}
|
||||
|
||||
fn cborIndef(t: *Thread, major: CborMajor) void {
|
||||
t.buf[t.off] = cborByte(major, 31);
|
||||
t.off += 1;
|
||||
}
|
||||
|
||||
fn itemKey(t: *Thread, key: ItemKey) void {
|
||||
t.cborHead(.pos, @intFromEnum(key));
|
||||
}
|
||||
|
||||
fn itemRef(t: *Thread, key: ItemKey, ref: ?u64) void {
|
||||
const r = ref orelse return;
|
||||
t.itemKey(key);
|
||||
// Full references compress like shit and most of the references point
|
||||
// into the same block, so optimize that case by using a negative
|
||||
// offset instead.
|
||||
if ((r >> 24) == t.block_num) t.cborHead(.neg, t.itemref - r - 1)
|
||||
else t.cborHead(.pos, r);
|
||||
}
|
||||
|
||||
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
||||
fn itemStart(t: *Thread, itype: model.EType, prev_item: ?u64, name: []const u8) u64 {
|
||||
const min_len = name.len + MAX_ITEM_LEN;
|
||||
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
||||
|
||||
t.itemref = (@as(u64, t.block_num) << 24) | t.off;
|
||||
t.cborIndef(.map);
|
||||
t.itemKey(.type);
|
||||
if (@intFromEnum(itype) >= 0) t.cborHead(.pos, @intCast(@intFromEnum(itype)))
|
||||
else t.cborHead(.neg, @intCast(-1 - @intFromEnum(itype)));
|
||||
t.itemKey(.name);
|
||||
t.cborHead(.bytes, name.len);
|
||||
@memcpy(t.buf[t.off..][0..name.len], name);
|
||||
t.off += name.len;
|
||||
t.itemRef(.prev, prev_item);
|
||||
return t.itemref;
|
||||
}
|
||||
|
||||
fn itemExt(t: *Thread, stat: *const sink.Stat) void {
|
||||
if (!main.config.extended) return;
|
||||
if (stat.ext.pack.hasuid) {
|
||||
t.itemKey(.uid);
|
||||
t.cborHead(.pos, stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
t.itemKey(.gid);
|
||||
t.cborHead(.pos, stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
t.itemKey(.mode);
|
||||
t.cborHead(.pos, stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
t.itemKey(.mtime);
|
||||
t.cborHead(.pos, stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
|
||||
fn itemEnd(t: *Thread) void {
|
||||
t.cborIndef(.simple);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
// TODO: When items are written out into blocks depth-first, parent dirs
|
||||
// will end up getting their items distributed over many blocks, which will
|
||||
// significantly slow down reading that dir's listing. It may be worth
|
||||
// buffering some items at the Dir level before flushing them out to the
|
||||
// Thread buffer.
|
||||
|
||||
// The lock protects all of the below, and is necessary because final()
|
||||
// accesses the parent dir and may be called from other threads.
|
||||
// I'm not expecting much lock contention, but it's possible to turn
|
||||
// last_item into an atomic integer and other fields could be split up for
|
||||
// subdir use.
|
||||
lock: std.Thread.Mutex = .{},
|
||||
last_sub: ?u64 = null,
|
||||
stat: sink.Stat,
|
||||
items: u64 = 0,
|
||||
size: u64 = 0,
|
||||
blocks: u64 = 0,
|
||||
err: bool = false,
|
||||
suberr: bool = false,
|
||||
shared_size: u64 = 0,
|
||||
shared_blocks: u64 = 0,
|
||||
inodes: Inodes = Inodes.init(main.allocator),
|
||||
|
||||
const Inodes = std.AutoHashMap(u64, Inode);
|
||||
const Inode = struct {
|
||||
size: u64,
|
||||
blocks: u64,
|
||||
nlink: u32,
|
||||
nfound: u32,
|
||||
};
|
||||
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (sp == .err) d.suberr = true;
|
||||
d.last_sub = t.itemStart(sp, d.last_sub, name);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (stat.etype != .link) {
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
}
|
||||
d.last_sub = t.itemStart(stat.etype, d.last_sub, name);
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(stat.blocks));
|
||||
|
||||
if (stat.etype == .link) {
|
||||
const lnk = d.inodes.getOrPut(stat.ino) catch unreachable;
|
||||
if (!lnk.found_existing) lnk.value_ptr.* = .{
|
||||
.size = stat.size,
|
||||
.blocks = stat.blocks,
|
||||
.nlink = stat.nlink,
|
||||
.nfound = 1,
|
||||
} else lnk.value_ptr.nfound += 1;
|
||||
t.itemKey(.ino);
|
||||
t.cborHead(.pos, stat.ino);
|
||||
t.itemKey(.nlink);
|
||||
t.cborHead(.pos, stat.nlink);
|
||||
}
|
||||
|
||||
t.itemExt(stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, stat: *const sink.Stat) Dir {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.err = true;
|
||||
}
|
||||
|
||||
// XXX: older JSON exports did not include the nlink count and have
|
||||
// this field set to '0'. We can deal with that when importing to
|
||||
// mem_sink, but the hardlink counting algorithm used here really does need
|
||||
// that information. Current code makes sure to count such links only once
|
||||
// per dir, but does not count them towards the shared_* fields. That
|
||||
// behavior is similar to ncdu 1.x, but the difference between memory
|
||||
// import and this file export might be surprising.
|
||||
fn countLinks(d: *Dir, parent: ?*Dir) void {
|
||||
var parent_new: u32 = 0;
|
||||
var it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
d.size +|= v.size;
|
||||
d.blocks +|= v.blocks;
|
||||
if (v.nlink > 1 and v.nfound < v.nlink) {
|
||||
d.shared_size +|= v.size;
|
||||
d.shared_blocks +|= v.blocks;
|
||||
}
|
||||
|
||||
const p = parent orelse continue;
|
||||
// All contained in this dir, no need to keep this entry around
|
||||
if (v.nlink > 0 and v.nfound >= v.nlink) {
|
||||
p.size +|= v.size;
|
||||
p.blocks +|= v.blocks;
|
||||
_ = d.inodes.remove(kv.key_ptr.*);
|
||||
} else if (!p.inodes.contains(kv.key_ptr.*))
|
||||
parent_new += 1;
|
||||
}
|
||||
|
||||
// Merge remaining inodes into parent
|
||||
const p = parent orelse return;
|
||||
if (d.inodes.count() == 0) return;
|
||||
|
||||
// If parent is empty, just transfer
|
||||
if (p.inodes.count() == 0) {
|
||||
p.inodes.deinit();
|
||||
p.inodes = d.inodes;
|
||||
d.inodes = Inodes.init(main.allocator); // So we can deinit() without affecting parent
|
||||
// Otherwise, merge
|
||||
} else {
|
||||
p.inodes.ensureUnusedCapacity(parent_new) catch unreachable;
|
||||
it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
const plnk = p.inodes.getOrPutAssumeCapacity(kv.key_ptr.*);
|
||||
if (!plnk.found_existing) plnk.value_ptr.* = v.*
|
||||
else plnk.value_ptr.*.nfound += v.nfound;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn final(d: *Dir, t: *Thread, name: []const u8, parent: ?*Dir) void {
|
||||
if (parent) |p| p.lock.lock();
|
||||
defer if (parent) |p| p.lock.unlock();
|
||||
|
||||
if (parent) |p| {
|
||||
// Different dev? Don't merge the 'inodes' sets, just count the
|
||||
// links here first so the sizes get added to the parent.
|
||||
if (p.stat.dev != d.stat.dev) d.countLinks(null);
|
||||
|
||||
p.items += d.items;
|
||||
p.size +|= d.size;
|
||||
p.blocks +|= d.blocks;
|
||||
if (d.suberr or d.err) p.suberr = true;
|
||||
|
||||
// Same dir, merge inodes
|
||||
if (p.stat.dev == d.stat.dev) d.countLinks(p);
|
||||
|
||||
p.last_sub = t.itemStart(.dir, p.last_sub, name);
|
||||
} else {
|
||||
d.countLinks(null);
|
||||
global.root_itemref = t.itemStart(.dir, null, name);
|
||||
}
|
||||
d.inodes.deinit();
|
||||
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, d.stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.stat.blocks));
|
||||
if (parent == null or parent.?.stat.dev != d.stat.dev) {
|
||||
t.itemKey(.dev);
|
||||
t.cborHead(.pos, d.stat.dev);
|
||||
}
|
||||
if (d.err or d.suberr) {
|
||||
t.itemKey(.rderr);
|
||||
t.cborHead(.simple, if (d.err) 21 else 20);
|
||||
}
|
||||
t.itemKey(.cumasize);
|
||||
t.cborHead(.pos, d.size +| d.stat.size);
|
||||
t.itemKey(.cumdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.blocks +| d.stat.blocks));
|
||||
if (d.shared_size > 0) {
|
||||
t.itemKey(.shrasize);
|
||||
t.cborHead(.pos, d.shared_size);
|
||||
}
|
||||
if (d.shared_blocks > 0) {
|
||||
t.itemKey(.shrdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.shared_blocks));
|
||||
}
|
||||
t.itemKey(.items);
|
||||
t.cborHead(.pos, d.items);
|
||||
t.itemRef(.sub, d.last_sub);
|
||||
t.itemExt(&d.stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub fn createRoot(stat: *const sink.Stat, threads: []sink.Thread) Dir {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.buf = main.allocator.alloc(u8, blockSize(0)) catch unreachable;
|
||||
}
|
||||
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn done(threads: []sink.Thread) void {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.flush(0);
|
||||
main.allocator.free(t.sink.bin.buf);
|
||||
}
|
||||
|
||||
while (std.mem.endsWith(u8, global.index.items, &[1]u8{0}**8))
|
||||
global.index.shrinkRetainingCapacity(global.index.items.len - 8);
|
||||
global.index.appendSlice(main.allocator, &bigu64(global.root_itemref)) catch unreachable;
|
||||
global.index.appendSlice(main.allocator, &blockHeader(1, @intCast(global.index.items.len + 4))) catch unreachable;
|
||||
global.index.items[0..4].* = blockHeader(1, @intCast(global.index.items.len));
|
||||
global.fd.writeAll(global.index.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.index.clearAndFree(main.allocator);
|
||||
|
||||
global.fd.close();
|
||||
}
|
||||
|
||||
pub fn setupOutput(fd: std.fs.File) void {
|
||||
global.fd = fd;
|
||||
fd.writeAll(SIGNATURE) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.file_off = 8;
|
||||
|
||||
// Placeholder for the index block header.
|
||||
global.index.appendSlice(main.allocator, "aaaa") catch unreachable;
|
||||
}
|
||||
521
src/bin_reader.zig
Normal file
521
src/bin_reader.zig
Normal file
|
|
@ -0,0 +1,521 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const util = @import("util.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const CborMajor = bin_export.CborMajor;
|
||||
const ItemKey = bin_export.ItemKey;
|
||||
|
||||
// Two ways to read a bin export:
|
||||
//
|
||||
// 1. Streaming import
|
||||
// - Read blocks sequentially, assemble items into model.Entry's and stitch
|
||||
// them together on the go.
|
||||
// - Does not use the sink.zig API, since sub-level items are read before their parent dirs.
|
||||
// - Useful when:
|
||||
// - User attempts to do a refresh or delete while browsing a file through (2)
|
||||
// - Reading from a stream
|
||||
//
|
||||
// 2. Random access browsing
|
||||
// - Read final block first to get the root item, then have browser.zig fetch
|
||||
// dir listings from this file.
|
||||
// - The default reader mode, requires much less memory than (1) and provides
|
||||
// a snappier first-browsing experience.
|
||||
//
|
||||
// The approach from (2) can also be used to walk through the entire directory
|
||||
// tree and stream it to sink.zig (either for importing or converting to JSON).
|
||||
// That would allow for better code reuse and low-memory conversion, but
|
||||
// performance will not be as good as a direct streaming read. Needs
|
||||
// benchmarks.
|
||||
//
|
||||
// This file only implements (2) at the moment.
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: []u8 = undefined;
|
||||
var blocks: [8]Block = [1]Block{.{}}**8;
|
||||
var counter: u64 = 0;
|
||||
|
||||
// Last itemref being read/parsed. This is a hack to provide *some* context on error.
|
||||
// Providing more context mainly just bloats the binary and decreases
|
||||
// performance for fairly little benefit. Nobody's going to debug a corrupted export.
|
||||
var lastitem: ?u64 = null;
|
||||
};
|
||||
|
||||
|
||||
const Block = struct {
|
||||
num: u32 = std.math.maxInt(u32),
|
||||
last: u64 = 0,
|
||||
data: []u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
inline fn bigu16(v: [2]u8) u16 { return std.mem.bigToNative(u16, @bitCast(v)); }
|
||||
inline fn bigu32(v: [4]u8) u32 { return std.mem.bigToNative(u32, @bitCast(v)); }
|
||||
inline fn bigu64(v: [8]u8) u64 { return std.mem.bigToNative(u64, @bitCast(v)); }
|
||||
|
||||
fn die() noreturn {
|
||||
@branchHint(.cold);
|
||||
if (global.lastitem) |e| ui.die("Error reading item {x} from file\n", .{e})
|
||||
else ui.die("Error reading from file\n", .{});
|
||||
}
|
||||
|
||||
|
||||
fn readBlock(num: u32) []const u8 {
|
||||
// Simple linear search, only suitable if we keep the number of in-memory blocks small.
|
||||
var block: *Block = &global.blocks[0];
|
||||
for (&global.blocks) |*b| {
|
||||
if (b.num == num) {
|
||||
if (b.last != global.counter) {
|
||||
global.counter += 1;
|
||||
b.last = global.counter;
|
||||
}
|
||||
return b.data;
|
||||
}
|
||||
if (block.last > b.last) block = b;
|
||||
}
|
||||
if (block.num != std.math.maxInt(u32))
|
||||
main.allocator.free(block.data);
|
||||
block.num = num;
|
||||
global.counter += 1;
|
||||
block.last = global.counter;
|
||||
|
||||
if (num > global.index.len/8 - 1) die();
|
||||
const offlen = bigu64(global.index[num*8..][0..8].*);
|
||||
const off = offlen >> 24;
|
||||
const len = offlen & 0xffffff;
|
||||
if (len <= 12) die();
|
||||
|
||||
// Only read the compressed data part, assume block header, number and footer are correct.
|
||||
const buf = main.allocator.alloc(u8, @intCast(len - 12)) catch unreachable;
|
||||
defer main.allocator.free(buf);
|
||||
const rdlen = global.fd.preadAll(buf, off + 8)
|
||||
catch |e| ui.die("Error reading from file: {s}\n", .{ui.errorString(e)});
|
||||
if (rdlen != buf.len) die();
|
||||
|
||||
const rawlen = c.ZSTD_getFrameContentSize(buf.ptr, buf.len);
|
||||
if (rawlen <= 0 or rawlen >= (1<<24)) die();
|
||||
block.data = main.allocator.alloc(u8, @intCast(rawlen)) catch unreachable;
|
||||
|
||||
const res = c.ZSTD_decompress(block.data.ptr, block.data.len, buf.ptr, buf.len);
|
||||
if (res != block.data.len) ui.die("Error decompressing block {} (expected {} got {})\n", .{ num, block.data.len, res });
|
||||
|
||||
return block.data;
|
||||
}
|
||||
|
||||
|
||||
const CborReader = struct {
|
||||
buf: []const u8,
|
||||
|
||||
fn head(r: *CborReader) CborVal {
|
||||
if (r.buf.len < 1) die();
|
||||
var v = CborVal{
|
||||
.rd = r,
|
||||
.major = @enumFromInt(r.buf[0] >> 5),
|
||||
.indef = false,
|
||||
.arg = 0,
|
||||
};
|
||||
switch (r.buf[0] & 0x1f) {
|
||||
0x00...0x17 => |n| {
|
||||
v.arg = n;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
0x18 => {
|
||||
if (r.buf.len < 2) die();
|
||||
v.arg = r.buf[1];
|
||||
r.buf = r.buf[2..];
|
||||
},
|
||||
0x19 => {
|
||||
if (r.buf.len < 3) die();
|
||||
v.arg = bigu16(r.buf[1..3].*);
|
||||
r.buf = r.buf[3..];
|
||||
},
|
||||
0x1a => {
|
||||
if (r.buf.len < 5) die();
|
||||
v.arg = bigu32(r.buf[1..5].*);
|
||||
r.buf = r.buf[5..];
|
||||
},
|
||||
0x1b => {
|
||||
if (r.buf.len < 9) die();
|
||||
v.arg = bigu64(r.buf[1..9].*);
|
||||
r.buf = r.buf[9..];
|
||||
},
|
||||
0x1f => switch (v.major) {
|
||||
.bytes, .text, .array, .map, .simple => {
|
||||
v.indef = true;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
else => die(),
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
// Read the next CBOR value, skipping any tags
|
||||
fn next(r: *CborReader) CborVal {
|
||||
while (true) {
|
||||
const v = r.head();
|
||||
if (v.major != .tag) return v;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const CborVal = struct {
|
||||
rd: *CborReader,
|
||||
major: CborMajor,
|
||||
indef: bool,
|
||||
arg: u64,
|
||||
|
||||
fn end(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.indef;
|
||||
}
|
||||
|
||||
fn int(v: *const CborVal, T: type) T {
|
||||
switch (v.major) {
|
||||
.pos => return std.math.cast(T, v.arg) orelse die(),
|
||||
.neg => {
|
||||
if (std.math.minInt(T) == 0) die();
|
||||
if (v.arg > std.math.maxInt(T)) die();
|
||||
return -@as(T, @intCast(v.arg)) + (-1);
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
}
|
||||
|
||||
fn isTrue(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.arg == 21;
|
||||
}
|
||||
|
||||
// Read either a byte or text string.
|
||||
// Doesn't validate UTF-8 strings, doesn't support indefinite-length strings.
|
||||
fn bytes(v: *const CborVal) []const u8 {
|
||||
if (v.indef or (v.major != .bytes and v.major != .text)) die();
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
defer v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
return v.rd.buf[0..@intCast(v.arg)];
|
||||
}
|
||||
|
||||
// Skip current value.
|
||||
fn skip(v: *const CborVal) void {
|
||||
// indefinite-length bytes, text, array or map; skip till break marker.
|
||||
if (v.major != .simple and v.indef) {
|
||||
while (true) {
|
||||
const n = v.rd.next();
|
||||
if (n.end()) return;
|
||||
n.skip();
|
||||
}
|
||||
}
|
||||
switch (v.major) {
|
||||
.bytes, .text => {
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
},
|
||||
.array => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg)) |_| v.rd.next().skip();
|
||||
},
|
||||
.map => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg*|2)) |_| v.rd.next().skip();
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn etype(v: *const CborVal) model.EType {
|
||||
const n = v.int(i32);
|
||||
return std.meta.intToEnum(model.EType, n)
|
||||
catch if (n < 0) .pattern else .nonreg;
|
||||
}
|
||||
|
||||
fn itemref(v: *const CborVal, cur: u64) u64 {
|
||||
if (v.major == .pos) return v.arg;
|
||||
if (v.major == .neg) {
|
||||
if (v.arg >= (cur & 0xffffff)) die();
|
||||
return cur - v.arg - 1;
|
||||
}
|
||||
return die();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
test "CBOR int parsing" {
|
||||
inline for (.{
|
||||
.{ .in = "\x00", .t = u1, .exp = 0 },
|
||||
.{ .in = "\x01", .t = u1, .exp = 1 },
|
||||
.{ .in = "\x18\x18", .t = u8, .exp = 0x18 },
|
||||
.{ .in = "\x18\xff", .t = u8, .exp = 0xff },
|
||||
.{ .in = "\x19\x07\xff", .t = u64, .exp = 0x7ff },
|
||||
.{ .in = "\x19\xff\xff", .t = u64, .exp = 0xffff },
|
||||
.{ .in = "\x1a\x00\x01\x00\x00", .t = u64, .exp = 0x10000 },
|
||||
.{ .in = "\x1b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.maxInt(i64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = u64, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x20", .t = i1, .exp = -1 },
|
||||
.{ .in = "\x38\x18", .t = i8, .exp = -0x19 },
|
||||
.{ .in = "\x39\x01\xf3", .t = i16, .exp = -500 },
|
||||
.{ .in = "\x3a\xfe\xdc\xba\x97", .t = i33, .exp = -0xfedc_ba98 },
|
||||
.{ .in = "\x3b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.minInt(i64) },
|
||||
.{ .in = "\x3b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.minInt(i65) },
|
||||
}) |t| {
|
||||
var r = CborReader{.buf = t.in};
|
||||
try std.testing.expectEqual(@as(t.t, t.exp), r.next().int(t.t));
|
||||
try std.testing.expectEqual(0, r.buf.len);
|
||||
}
|
||||
}
|
||||
|
||||
test "CBOR string parsing" {
|
||||
var r = CborReader{.buf="\x40"};
|
||||
try std.testing.expectEqualStrings("", r.next().bytes());
|
||||
r.buf = "\x45\x00\x01\x02\x03\x04x";
|
||||
try std.testing.expectEqualStrings("\x00\x01\x02\x03\x04", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("x", r.buf);
|
||||
r.buf = "\x78\x241234567890abcdefghijklmnopqrstuvwxyz-end";
|
||||
try std.testing.expectEqualStrings("1234567890abcdefghijklmnopqrstuvwxyz", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("-end", r.buf);
|
||||
}
|
||||
|
||||
test "CBOR skip parsing" {
|
||||
inline for (.{
|
||||
"\x00",
|
||||
"\x40",
|
||||
"\x41a",
|
||||
"\x5f\xff",
|
||||
"\x5f\x41a\xff",
|
||||
"\x80",
|
||||
"\x81\x00",
|
||||
"\x9f\xff",
|
||||
"\x9f\x9f\xff\xff",
|
||||
"\x9f\x9f\x81\x00\xff\xff",
|
||||
"\xa0",
|
||||
"\xa1\x00\x01",
|
||||
"\xbf\xff",
|
||||
"\xbf\xc0\x00\x9f\xff\xff",
|
||||
}) |s| {
|
||||
var r = CborReader{.buf = s ++ "garbage"};
|
||||
r.next().skip();
|
||||
try std.testing.expectEqualStrings(r.buf, "garbage");
|
||||
}
|
||||
}
|
||||
|
||||
const ItemParser = struct {
|
||||
r: CborReader,
|
||||
len: ?u64 = null,
|
||||
|
||||
const Field = struct {
|
||||
key: ItemKey,
|
||||
val: CborVal,
|
||||
};
|
||||
|
||||
fn init(buf: []const u8) ItemParser {
|
||||
var r = ItemParser{.r = .{.buf = buf}};
|
||||
const head = r.r.next();
|
||||
if (head.major != .map) die();
|
||||
if (!head.indef) r.len = head.arg;
|
||||
return r;
|
||||
}
|
||||
|
||||
fn key(r: *ItemParser) ?CborVal {
|
||||
if (r.len) |*l| {
|
||||
if (l.* == 0) return null;
|
||||
l.* -= 1;
|
||||
return r.r.next();
|
||||
} else {
|
||||
const v = r.r.next();
|
||||
return if (v.end()) null else v;
|
||||
}
|
||||
}
|
||||
|
||||
// Skips over any fields that don't fit into an ItemKey.
|
||||
fn next(r: *ItemParser) ?Field {
|
||||
while (r.key()) |k| {
|
||||
if (k.major == .pos and k.arg <= std.math.maxInt(@typeInfo(ItemKey).@"enum".tag_type)) return .{
|
||||
.key = @enumFromInt(k.arg),
|
||||
.val = r.r.next(),
|
||||
} else {
|
||||
k.skip();
|
||||
r.r.next().skip();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
// Returned buffer is valid until the next readItem().
|
||||
fn readItem(ref: u64) ItemParser {
|
||||
global.lastitem = ref;
|
||||
if (ref >= (1 << (24 + 32))) die();
|
||||
const block = readBlock(@intCast(ref >> 24));
|
||||
if ((ref & 0xffffff) >= block.len) die();
|
||||
return ItemParser.init(block[@intCast(ref & 0xffffff)..]);
|
||||
}
|
||||
|
||||
const Import = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
fields: Fields = .{},
|
||||
p: ItemParser = undefined,
|
||||
|
||||
const Fields = struct {
|
||||
name: []const u8 = "",
|
||||
rderr: bool = false,
|
||||
prev: ?u64 = null,
|
||||
sub: ?u64 = null,
|
||||
};
|
||||
|
||||
fn readFields(ctx: *Import, ref: u64) void {
|
||||
ctx.p = readItem(ref);
|
||||
var hastype = false;
|
||||
|
||||
while (ctx.p.next()) |kv| switch (kv.key) {
|
||||
.type => {
|
||||
ctx.stat.etype = kv.val.etype();
|
||||
hastype = true;
|
||||
},
|
||||
.name => ctx.fields.name = kv.val.bytes(),
|
||||
.prev => ctx.fields.prev = kv.val.itemref(ref),
|
||||
.asize => ctx.stat.size = kv.val.int(u64),
|
||||
.dsize => ctx.stat.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.dev => ctx.stat.dev = kv.val.int(u64),
|
||||
.rderr => ctx.fields.rderr = kv.val.isTrue(),
|
||||
.sub => ctx.fields.sub = kv.val.itemref(ref),
|
||||
.ino => ctx.stat.ino = kv.val.int(u64),
|
||||
.nlink => ctx.stat.nlink = kv.val.int(u31),
|
||||
.uid => { ctx.stat.ext.uid = kv.val.int(u32); ctx.stat.ext.pack.hasuid = true; },
|
||||
.gid => { ctx.stat.ext.gid = kv.val.int(u32); ctx.stat.ext.pack.hasgid = true; },
|
||||
.mode => { ctx.stat.ext.mode = kv.val.int(u16); ctx.stat.ext.pack.hasmode = true; },
|
||||
.mtime => { ctx.stat.ext.mtime = kv.val.int(u64); ctx.stat.ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
|
||||
if (!hastype) die();
|
||||
if (ctx.fields.name.len == 0) die();
|
||||
}
|
||||
|
||||
fn import(ctx: *Import, ref: u64, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.fields = .{};
|
||||
ctx.readFields(ref);
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const prev = ctx.fields.prev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, ctx.fields.name, &ctx.stat)
|
||||
else sink.createRoot(ctx.fields.name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.fields.rderr) dir.setReadError(ctx.sink);
|
||||
|
||||
ctx.fields.prev = ctx.fields.sub;
|
||||
while (ctx.fields.prev) |n| ctx.import(n, dir, ctx.stat.dev);
|
||||
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
ctx.fields.prev = prev;
|
||||
|
||||
} else {
|
||||
const p = parent orelse die();
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
p.addSpecial(ctx.sink, ctx.fields.name, ctx.stat.etype)
|
||||
else
|
||||
p.addStat(ctx.sink, ctx.fields.name, &ctx.stat);
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve an itemref and return a newly allocated entry.
|
||||
// Dir.parent and Link.next/prev are left uninitialized.
|
||||
pub fn get(ref: u64, alloc: std.mem.Allocator) *model.Entry {
|
||||
const parser = readItem(ref);
|
||||
|
||||
var etype: ?model.EType = null;
|
||||
var name: []const u8 = "";
|
||||
var p = parser;
|
||||
var ext = model.Ext{};
|
||||
while (p.next()) |kv| {
|
||||
switch (kv.key) {
|
||||
.type => etype = kv.val.etype(),
|
||||
.name => name = kv.val.bytes(),
|
||||
.uid => { ext.uid = kv.val.int(u32); ext.pack.hasuid = true; },
|
||||
.gid => { ext.gid = kv.val.int(u32); ext.pack.hasgid = true; },
|
||||
.mode => { ext.mode = kv.val.int(u16); ext.pack.hasmode = true; },
|
||||
.mtime => { ext.mtime = kv.val.int(u64); ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
}
|
||||
}
|
||||
if (etype == null or name.len == 0) die();
|
||||
|
||||
var entry = model.Entry.create(alloc, etype.?, main.config.extended and !ext.isEmpty(), name);
|
||||
entry.next = .{ .ref = std.math.maxInt(u64) };
|
||||
if (entry.ext()) |e| e.* = ext;
|
||||
if (entry.dir()) |d| d.sub = .{ .ref = std.math.maxInt(u64) };
|
||||
p = parser;
|
||||
while (p.next()) |kv| switch (kv.key) {
|
||||
.prev => entry.next = .{ .ref = kv.val.itemref(ref) },
|
||||
.asize => { if (entry.pack.etype != .dir) entry.size = kv.val.int(u64); },
|
||||
.dsize => { if (entry.pack.etype != .dir) entry.pack.blocks = @intCast(kv.val.int(u64)/512); },
|
||||
|
||||
.rderr => { if (entry.dir()) |d| {
|
||||
if (kv.val.isTrue()) d.pack.err = true
|
||||
else d.pack.suberr = true;
|
||||
} },
|
||||
.dev => { if (entry.dir()) |d| d.pack.dev = model.devices.getId(kv.val.int(u64)); },
|
||||
.cumasize => entry.size = kv.val.int(u64),
|
||||
.cumdsize => entry.pack.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.shrasize => { if (entry.dir()) |d| d.shared_size = kv.val.int(u64); },
|
||||
.shrdsize => { if (entry.dir()) |d| d.shared_blocks = kv.val.int(u64)/512; },
|
||||
.items => { if (entry.dir()) |d| d.items = util.castClamp(u32, kv.val.int(u64)); },
|
||||
.sub => { if (entry.dir()) |d| d.sub = .{ .ref = kv.val.itemref(ref) }; },
|
||||
|
||||
.ino => { if (entry.link()) |l| l.ino = kv.val.int(u64); },
|
||||
.nlink => { if (entry.link()) |l| l.pack.nlink = kv.val.int(u31); },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
return entry;
|
||||
}
|
||||
|
||||
pub fn getRoot() u64 {
|
||||
return bigu64(global.index[global.index.len-8..][0..8].*);
|
||||
}
|
||||
|
||||
// Walk through the directory tree in depth-first order and pass results to sink.zig.
|
||||
// Depth-first is required for JSON export, but more efficient strategies are
|
||||
// possible for other sinks. Parallel import is also an option, but that's more
|
||||
// complex and likely less efficient than a streaming import.
|
||||
pub fn import() void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
var ctx = Import{.sink = &sink_threads[0]};
|
||||
ctx.import(getRoot(), null, 0);
|
||||
sink.done();
|
||||
}
|
||||
|
||||
// Assumes that the file signature has already been read and validated.
|
||||
pub fn open(fd: std.fs.File) !void {
|
||||
global.fd = fd;
|
||||
|
||||
// Do not use fd.getEndPos() because that requires newer kernels supporting statx() #261.
|
||||
try fd.seekFromEnd(0);
|
||||
const size = try fd.getPos();
|
||||
if (size < 16) return error.EndOfStream;
|
||||
|
||||
// Read index block
|
||||
var buf: [4]u8 = undefined;
|
||||
if (try fd.preadAll(&buf, size - 4) != 4) return error.EndOfStream;
|
||||
const index_header = bigu32(buf);
|
||||
if ((index_header >> 28) != 1 or (index_header & 7) != 0) die();
|
||||
const len = (index_header & 0x0fffffff) - 8; // excluding block header & footer
|
||||
if (len >= size) die();
|
||||
global.index = main.allocator.alloc(u8, len) catch unreachable;
|
||||
if (try fd.preadAll(global.index, size - len - 4) != global.index.len) return error.EndOfStream;
|
||||
}
|
||||
348
src/browser.zig
348
src/browser.zig
|
|
@ -4,22 +4,32 @@
|
|||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const bin_reader = @import("bin_reader.zig");
|
||||
const delete = @import("delete.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @cImport(@cInclude("time.h"));
|
||||
const c = @import("c.zig").c;
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Currently opened directory.
|
||||
pub var dir_parent: *model.Dir = undefined;
|
||||
pub var dir_path: [:0]u8 = undefined;
|
||||
var dir_parents: std.ArrayListUnmanaged(model.Ref) = .empty;
|
||||
var dir_alloc = std.heap.ArenaAllocator.init(main.allocator);
|
||||
|
||||
// Used to keep track of which dir is which ref, so we can enter it.
|
||||
// Only used for binreader browsing.
|
||||
var dir_refs: std.ArrayListUnmanaged(struct { ptr: *model.Dir, ref: u64 }) = .empty;
|
||||
|
||||
// Sorted list of all items in the currently opened directory.
|
||||
// (first item may be null to indicate the "parent directory" item)
|
||||
var dir_items = std.ArrayList(?*model.Entry).init(main.allocator);
|
||||
var dir_items: std.ArrayListUnmanaged(?*model.Entry) = .empty;
|
||||
|
||||
var dir_max_blocks: u64 = 0;
|
||||
var dir_max_size: u64 = 0;
|
||||
var dir_has_shared: bool = false;
|
||||
var dir_loading: u64 = 0;
|
||||
|
||||
// Index into dir_items that is currently selected.
|
||||
var cursor_idx: usize = 0;
|
||||
|
|
@ -32,28 +42,28 @@ const View = struct {
|
|||
|
||||
// The hash(name) of the selected entry (cursor), this is used to derive
|
||||
// cursor_idx after sorting or changing directory.
|
||||
// (collisions may cause the wrong entry to be selected, but dealing with
|
||||
// string allocations sucks and I expect collisions to be rare enough)
|
||||
cursor_hash: u64 = 0,
|
||||
|
||||
fn hashEntry(entry: ?*model.Entry) u64 {
|
||||
return if (entry) |e| std.hash.Wyhash.hash(0, e.name()) else 0;
|
||||
fn dirHash() u64 {
|
||||
return std.hash.Wyhash.hash(0, dir_path);
|
||||
}
|
||||
|
||||
// Update cursor_hash and save the current view to the hash table.
|
||||
fn save(self: *@This()) void {
|
||||
self.cursor_hash = if (dir_items.items.len == 0) 0
|
||||
else hashEntry(dir_items.items[cursor_idx]);
|
||||
opened_dir_views.put(@intFromPtr(dir_parent), self.*) catch {};
|
||||
else if (dir_items.items[cursor_idx]) |e| e.nameHash()
|
||||
else 0;
|
||||
opened_dir_views.put(dirHash(), self.*) catch {};
|
||||
}
|
||||
|
||||
// Should be called after dir_parent or dir_items has changed, will load the last saved view and find the proper cursor_idx.
|
||||
fn load(self: *@This(), sel: ?*const model.Entry) void {
|
||||
if (opened_dir_views.get(@intFromPtr(dir_parent))) |v| self.* = v
|
||||
fn load(self: *@This(), sel: u64) void {
|
||||
if (opened_dir_views.get(dirHash())) |v| self.* = v
|
||||
else self.* = @This(){};
|
||||
cursor_idx = 0;
|
||||
for (dir_items.items, 0..) |e, i| {
|
||||
if (if (sel != null) e == sel else self.cursor_hash == hashEntry(e)) {
|
||||
const h = if (e) |x| x.nameHash() else 0;
|
||||
if (if (sel != 0) h == sel else self.cursor_hash == h) {
|
||||
cursor_idx = i;
|
||||
break;
|
||||
}
|
||||
|
|
@ -64,10 +74,8 @@ const View = struct {
|
|||
var current_view = View{};
|
||||
|
||||
// Directories the user has browsed to before, and which item was last selected.
|
||||
// The key is the @intFromPtr() of the opened *Dir; An int because the pointer
|
||||
// itself may have gone stale after deletion or refreshing. They're only for
|
||||
// lookups, not dereferencing.
|
||||
var opened_dir_views = std.AutoHashMap(usize, View).init(main.allocator);
|
||||
// The key is the hash of dir_path;
|
||||
var opened_dir_views = std.AutoHashMap(u64, View).init(main.allocator);
|
||||
|
||||
fn sortIntLt(a: anytype, b: @TypeOf(a)) ?bool {
|
||||
return if (a == b) null else if (main.config.sort_order == .asc) a < b else a > b;
|
||||
|
|
@ -77,8 +85,8 @@ fn sortLt(_: void, ap: ?*model.Entry, bp: ?*model.Entry) bool {
|
|||
const a = ap.?;
|
||||
const b = bp.?;
|
||||
|
||||
if (main.config.sort_dirsfirst and a.isDirectory() != b.isDirectory())
|
||||
return a.isDirectory();
|
||||
if (main.config.sort_dirsfirst and a.pack.etype.isDirectory() != b.pack.etype.isDirectory())
|
||||
return a.pack.etype.isDirectory();
|
||||
|
||||
switch (main.config.sort_col) {
|
||||
.name => {}, // name sorting is the fallback
|
||||
|
|
@ -113,7 +121,7 @@ fn sortLt(_: void, ap: ?*model.Entry, bp: ?*model.Entry) bool {
|
|||
// - config.sort_* changes
|
||||
// - dir_items changes (i.e. from loadDir())
|
||||
// - files in this dir have changed in a way that affects their ordering
|
||||
fn sortDir(next_sel: ?*const model.Entry) void {
|
||||
fn sortDir(next_sel: u64) void {
|
||||
// No need to sort the first item if that's the parent dir reference,
|
||||
// excluding that allows sortLt() to ignore null values.
|
||||
const lst = dir_items.items[(if (dir_items.items.len > 0 and dir_items.items[0] == null) @as(usize, 1) else 0)..];
|
||||
|
|
@ -125,31 +133,103 @@ fn sortDir(next_sel: ?*const model.Entry) void {
|
|||
// - dir_parent changes (i.e. we change directory)
|
||||
// - config.show_hidden changes
|
||||
// - files in this dir have been added or removed
|
||||
pub fn loadDir(next_sel: ?*const model.Entry) void {
|
||||
pub fn loadDir(next_sel: u64) void {
|
||||
// XXX: The current dir listing is wiped before loading the new one, which
|
||||
// causes the screen to flicker a bit when the loading indicator is drawn.
|
||||
// Should we keep the old listing around?
|
||||
main.event_delay_timer.reset();
|
||||
_ = dir_alloc.reset(.free_all);
|
||||
dir_items.shrinkRetainingCapacity(0);
|
||||
dir_refs.shrinkRetainingCapacity(0);
|
||||
dir_max_size = 1;
|
||||
dir_max_blocks = 1;
|
||||
dir_has_shared = false;
|
||||
|
||||
if (dir_parent != model.root)
|
||||
dir_items.append(null) catch unreachable;
|
||||
var it = dir_parent.sub;
|
||||
while (it) |e| : (it = e.next) {
|
||||
if (dir_parents.items.len > 1)
|
||||
dir_items.append(main.allocator, null) catch unreachable;
|
||||
var ref = dir_parent.sub;
|
||||
while (!ref.isNull()) {
|
||||
const e =
|
||||
if (main.config.binreader) bin_reader.get(ref.ref, dir_alloc.allocator())
|
||||
else ref.ptr.?;
|
||||
|
||||
if (e.pack.blocks > dir_max_blocks) dir_max_blocks = e.pack.blocks;
|
||||
if (e.size > dir_max_size) dir_max_size = e.size;
|
||||
const shown = main.config.show_hidden or blk: {
|
||||
const excl = if (e.file()) |f| f.pack.excluded else false;
|
||||
const excl = switch (e.pack.etype) {
|
||||
.pattern, .otherfs, .kernfs => true,
|
||||
else => false,
|
||||
};
|
||||
const name = e.name();
|
||||
break :blk !excl and name[0] != '.' and name[name.len-1] != '~';
|
||||
};
|
||||
if (shown) {
|
||||
dir_items.append(e) catch unreachable;
|
||||
if (e.dir()) |d| if (d.shared_blocks > 0 or d.shared_size > 0) { dir_has_shared = true; };
|
||||
dir_items.append(main.allocator, e) catch unreachable;
|
||||
if (e.dir()) |d| {
|
||||
if (d.shared_blocks > 0 or d.shared_size > 0) dir_has_shared = true;
|
||||
if (main.config.binreader) dir_refs.append(main.allocator, .{ .ptr = d, .ref = ref.ref }) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
ref = e.next;
|
||||
dir_loading += 1;
|
||||
if ((dir_loading & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
sortDir(next_sel);
|
||||
dir_loading = 0;
|
||||
}
|
||||
|
||||
|
||||
pub fn initRoot() void {
|
||||
if (main.config.binreader) {
|
||||
const ref = bin_reader.getRoot();
|
||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse ui.die("Invalid import\n", .{});
|
||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
||||
} else {
|
||||
dir_parent = model.root;
|
||||
dir_parents.append(main.allocator, .{ .ptr = &dir_parent.entry }) catch unreachable;
|
||||
}
|
||||
dir_path = main.allocator.dupeZ(u8, dir_parent.entry.name()) catch unreachable;
|
||||
loadDir(0);
|
||||
}
|
||||
|
||||
fn enterSub(e: *model.Dir) void {
|
||||
if (main.config.binreader) {
|
||||
const ref = blk: {
|
||||
for (dir_refs.items) |r| if (r.ptr == e) break :blk r.ref;
|
||||
return;
|
||||
};
|
||||
dir_parent.entry.destroy(main.allocator);
|
||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse unreachable;
|
||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
||||
} else {
|
||||
dir_parent = e;
|
||||
dir_parents.append(main.allocator, .{ .ptr = &e.entry }) catch unreachable;
|
||||
}
|
||||
|
||||
const newpath = std.fs.path.joinZ(main.allocator, &[_][]const u8{ dir_path, e.entry.name() }) catch unreachable;
|
||||
main.allocator.free(dir_path);
|
||||
dir_path = newpath;
|
||||
}
|
||||
|
||||
fn enterParent() void {
|
||||
std.debug.assert(dir_parents.items.len > 1);
|
||||
|
||||
_ = dir_parents.pop();
|
||||
const p = dir_parents.items[dir_parents.items.len-1];
|
||||
if (main.config.binreader) {
|
||||
dir_parent.entry.destroy(main.allocator);
|
||||
dir_parent = bin_reader.get(p.ref, main.allocator).dir() orelse unreachable;
|
||||
} else
|
||||
dir_parent = p.ptr.?.dir() orelse unreachable;
|
||||
|
||||
const newpath = main.allocator.dupeZ(u8, std.fs.path.dirname(dir_path) orelse unreachable) catch unreachable;
|
||||
main.allocator.free(dir_path);
|
||||
dir_path = newpath;
|
||||
}
|
||||
|
||||
|
||||
const Row = struct {
|
||||
row: u32,
|
||||
col: u32 = 0,
|
||||
|
|
@ -161,19 +241,17 @@ const Row = struct {
|
|||
fn flag(self: *Self) void {
|
||||
defer self.col += 2;
|
||||
const item = self.item orelse return;
|
||||
const ch: u7 = ch: {
|
||||
if (item.file()) |f| {
|
||||
if (f.pack.err) break :ch '!';
|
||||
if (f.pack.excluded) break :ch '<';
|
||||
if (f.pack.other_fs) break :ch '>';
|
||||
if (f.pack.kernfs) break :ch '^';
|
||||
if (f.pack.notreg) break :ch '@';
|
||||
} else if (item.dir()) |d| {
|
||||
if (d.pack.err) break :ch '!';
|
||||
if (d.pack.suberr) break :ch '.';
|
||||
if (d.sub == null) break :ch 'e';
|
||||
} else if (item.link()) |_| break :ch 'H';
|
||||
return;
|
||||
const ch: u7 = switch (item.pack.etype) {
|
||||
.dir => if (item.dir().?.pack.err) '!'
|
||||
else if (item.dir().?.pack.suberr) '.'
|
||||
else if (item.dir().?.sub.isNull()) 'e'
|
||||
else return,
|
||||
.link => 'H',
|
||||
.pattern => '<',
|
||||
.otherfs => '>',
|
||||
.kernfs => '^',
|
||||
.nonreg => '@',
|
||||
else => return,
|
||||
};
|
||||
ui.move(self.row, self.col);
|
||||
self.bg.fg(.flag);
|
||||
|
|
@ -214,10 +292,13 @@ const Row = struct {
|
|||
ui.addch('[');
|
||||
if (main.config.show_percent) {
|
||||
self.bg.fg(.num);
|
||||
ui.addprint("{d:>5.1}", .{ 100 *
|
||||
if (main.config.show_blocks) @as(f32, @floatFromInt(item.pack.blocks)) / @as(f32, @floatFromInt(@max(1, dir_parent.entry.pack.blocks)))
|
||||
else @as(f32, @floatFromInt(item.size)) / @as(f32, @floatFromInt(@max(1, dir_parent.entry.size)))
|
||||
});
|
||||
var num : u64 = if (main.config.show_blocks) item.pack.blocks else item.size;
|
||||
var denom : u64 = if (main.config.show_blocks) dir_parent.entry.pack.blocks else dir_parent.entry.size;
|
||||
if (num > (1<<54)) { // avoid overflow
|
||||
num >>= 10;
|
||||
denom >>= 10;
|
||||
}
|
||||
ui.addstr(&util.fmt5dec(@intCast( @min(1000, (num * 1000 + (denom / 2)) / @max(1, denom) ))));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('%');
|
||||
}
|
||||
|
|
@ -259,12 +340,12 @@ const Row = struct {
|
|||
ui.addnum(self.bg, n);
|
||||
} else if (n < 100_000)
|
||||
ui.addnum(self.bg, n)
|
||||
else if (n < 1000_000) {
|
||||
ui.addprint("{d:>5.1}", .{ @as(f32, @floatFromInt(n)) / 1000 });
|
||||
else if (n < 999_950) {
|
||||
ui.addstr(&util.fmt5dec(@intCast( (n + 50) / 100 )));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('k');
|
||||
} else if (n < 1000_000_000) {
|
||||
ui.addprint("{d:>5.1}", .{ @as(f32, @floatFromInt(n)) / 1000_000 });
|
||||
} else if (n < 999_950_000) {
|
||||
ui.addstr(&util.fmt5dec(@intCast( (n + 50_000) / 100_000 )));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('M');
|
||||
} else {
|
||||
|
|
@ -281,16 +362,21 @@ const Row = struct {
|
|||
if (!main.config.show_mtime or self.col + 37 > ui.cols) return;
|
||||
defer self.col += 27;
|
||||
ui.move(self.row, self.col+1);
|
||||
const ext = (if (self.item) |e| e.ext() else @as(?*model.Ext, null)) orelse dir_parent.entry.ext();
|
||||
if (ext) |e| ui.addts(self.bg, e.mtime)
|
||||
else ui.addstr(" no mtime");
|
||||
const ext = if (self.item) |e| e.ext() else dir_parent.entry.ext();
|
||||
if (ext) |e| {
|
||||
if (e.pack.hasmtime) {
|
||||
ui.addts(self.bg, e.mtime);
|
||||
return;
|
||||
}
|
||||
}
|
||||
ui.addstr(" no mtime");
|
||||
}
|
||||
|
||||
fn name(self: *Self) void {
|
||||
ui.move(self.row, self.col);
|
||||
if (self.item) |i| {
|
||||
self.bg.fg(if (i.pack.etype == .dir) .dir else .default);
|
||||
ui.addch(if (i.isDirectory()) '/' else ' ');
|
||||
ui.addch(if (i.pack.etype.isDirectory()) '/' else ' ');
|
||||
ui.addstr(ui.shorten(ui.toUtf8(i.name()), ui.cols -| self.col -| 1));
|
||||
} else {
|
||||
self.bg.fg(.dir);
|
||||
|
|
@ -314,7 +400,7 @@ const Row = struct {
|
|||
};
|
||||
|
||||
var state: enum { main, quit, help, info } = .main;
|
||||
var message: ?[:0]const u8 = null;
|
||||
var message: ?[]const [:0]const u8 = null;
|
||||
|
||||
const quit = struct {
|
||||
fn draw() void {
|
||||
|
|
@ -344,7 +430,7 @@ const info = struct {
|
|||
|
||||
var tab: Tab = .info;
|
||||
var entry: ?*model.Entry = null;
|
||||
var links: ?std.ArrayList(*model.Link) = null;
|
||||
var links: ?std.ArrayListUnmanaged(*model.Link) = null;
|
||||
var links_top: usize = 0;
|
||||
var links_idx: usize = 0;
|
||||
|
||||
|
|
@ -359,7 +445,7 @@ const info = struct {
|
|||
// Set the displayed entry to the currently selected item and open the tab.
|
||||
fn set(e: ?*model.Entry, t: Tab) void {
|
||||
if (e != entry) {
|
||||
if (links) |*l| l.deinit();
|
||||
if (links) |*l| l.deinit(main.allocator);
|
||||
links = null;
|
||||
links_top = 0;
|
||||
links_idx = 0;
|
||||
|
|
@ -371,25 +457,27 @@ const info = struct {
|
|||
}
|
||||
state = .info;
|
||||
tab = t;
|
||||
if (tab == .links and links == null) {
|
||||
var list = std.ArrayList(*model.Link).init(main.allocator);
|
||||
if (tab == .links and links == null and !main.config.binreader) {
|
||||
var list: std.ArrayListUnmanaged(*model.Link) = .empty;
|
||||
var l = e.?.link().?;
|
||||
while (true) {
|
||||
list.append(l) catch unreachable;
|
||||
list.append(main.allocator, l) catch unreachable;
|
||||
l = l.next;
|
||||
if (&l.entry == e)
|
||||
break;
|
||||
}
|
||||
// TODO: Zig's sort() implementation is type-generic and not very
|
||||
// small. I suspect we can get a good save on our binary size by using
|
||||
// a smaller or non-generic sort. This doesn't have to be very fast.
|
||||
std.mem.sort(*model.Link, list.items, {}, lt);
|
||||
std.sort.heap(*model.Link, list.items, {}, lt);
|
||||
for (list.items, 0..) |n,i| if (&n.entry == e) { links_idx = i; };
|
||||
links = list;
|
||||
}
|
||||
}
|
||||
|
||||
fn drawLinks(box: ui.Box, row: *u32, rows: u32, cols: u32) void {
|
||||
if (main.config.binreader) {
|
||||
box.move(2, 2);
|
||||
ui.addstr("This feature is not available when reading from file.");
|
||||
return;
|
||||
}
|
||||
const numrows = rows -| 4;
|
||||
if (links_idx < links_top) links_top = links_idx;
|
||||
if (links_idx >= links_top + numrows) links_top = links_idx - numrows + 1;
|
||||
|
|
@ -443,33 +531,46 @@ const info = struct {
|
|||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
if (e.ext()) |ext| {
|
||||
var buf: [32]u8 = undefined;
|
||||
if (ext.pack.hasmode) {
|
||||
ui.addstr("Mode: ");
|
||||
ui.style(.default);
|
||||
ui.addmode(ext.mode);
|
||||
var buf: [32]u8 = undefined;
|
||||
ui.style(.bold);
|
||||
}
|
||||
if (ext.pack.hasuid) {
|
||||
ui.addstr(" UID: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(std.fmt.bufPrintZ(&buf, "{d:<6}", .{ ext.uid }) catch unreachable);
|
||||
ui.style(.bold);
|
||||
}
|
||||
if (ext.pack.hasgid) {
|
||||
ui.addstr(" GID: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(std.fmt.bufPrintZ(&buf, "{d:<6}", .{ ext.gid }) catch unreachable);
|
||||
}
|
||||
} else {
|
||||
ui.addstr("Type: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(if (e.isDirectory()) "Directory" else if (if (e.file()) |f| f.pack.notreg else false) "Other" else "File");
|
||||
ui.addstr(switch (e.pack.etype) {
|
||||
.dir => "Directory",
|
||||
.nonreg => "Other",
|
||||
.reg, .link => "File",
|
||||
else => "Excluded",
|
||||
});
|
||||
}
|
||||
row.* += 1;
|
||||
|
||||
// Last modified
|
||||
if (e.ext()) |ext| {
|
||||
if (ext.pack.hasmtime) {
|
||||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
ui.addstr("Last modified: ");
|
||||
ui.addts(.default, ext.mtime);
|
||||
row.* += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Disk usage & Apparent size
|
||||
drawSize(box, row, " Disk usage: ", util.blocksToSize(e.pack.blocks), if (e.dir()) |d| util.blocksToSize(d.shared_blocks) else 0);
|
||||
|
|
@ -489,7 +590,7 @@ const info = struct {
|
|||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
ui.addstr(" Link count: ");
|
||||
ui.addnum(.default, model.inodes.map.get(l).?.nlink);
|
||||
ui.addnum(.default, l.pack.nlink);
|
||||
box.move(row.*, 23);
|
||||
ui.style(.bold);
|
||||
ui.addstr(" Inode: ");
|
||||
|
|
@ -507,7 +608,7 @@ const info = struct {
|
|||
// for each item. Think it's better to have a dynamic height based on
|
||||
// terminal size and scroll if the content doesn't fit.
|
||||
const rows = 5 // border + padding + close message
|
||||
+ if (tab == .links) 8 else
|
||||
+ if (tab == .links and !main.config.binreader) 8 else
|
||||
4 // name + type + disk usage + apparent size
|
||||
+ (if (e.ext() != null) @as(u32, 1) else 0) // last modified
|
||||
+ (if (e.link() != null) @as(u32, 1) else 0) // link count
|
||||
|
|
@ -543,18 +644,18 @@ const info = struct {
|
|||
fn keyInput(ch: i32) bool {
|
||||
if (entry.?.pack.etype == .link) {
|
||||
switch (ch) {
|
||||
'1', 'h', ui.c.KEY_LEFT => { set(entry, .info); return true; },
|
||||
'2', 'l', ui.c.KEY_RIGHT => { set(entry, .links); return true; },
|
||||
'1', 'h', c.KEY_LEFT => { set(entry, .info); return true; },
|
||||
'2', 'l', c.KEY_RIGHT => { set(entry, .links); return true; },
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
if (tab == .links) {
|
||||
if (tab == .links and !main.config.binreader) {
|
||||
if (keyInputSelection(ch, &links_idx, links.?.items.len, 5))
|
||||
return true;
|
||||
if (ch == 10) { // Enter - go to selected entry
|
||||
const l = links.?.items[links_idx];
|
||||
dir_parent = l.parent;
|
||||
loadDir(&l.entry);
|
||||
loadDir(l.entry.nameHash());
|
||||
set(null, .info);
|
||||
}
|
||||
}
|
||||
|
|
@ -701,9 +802,9 @@ const help = struct {
|
|||
'1' => tab = .keys,
|
||||
'2' => tab = .flags,
|
||||
'3' => tab = .about,
|
||||
'h', ui.c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
||||
'l', ui.c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
||||
'j', ' ', ui.c.KEY_DOWN, ui.c.KEY_NPAGE => {
|
||||
'h', c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
||||
'l', c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
||||
'j', ' ', c.KEY_DOWN, c.KEY_NPAGE => {
|
||||
const max = switch (tab) {
|
||||
.keys => keys.len/2 - keylines,
|
||||
else => @as(u32, 0),
|
||||
|
|
@ -711,7 +812,7 @@ const help = struct {
|
|||
if (offset < max)
|
||||
offset += 1;
|
||||
},
|
||||
'k', ui.c.KEY_UP, ui.c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
||||
'k', c.KEY_UP, c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
||||
else => state = .main,
|
||||
}
|
||||
}
|
||||
|
|
@ -727,7 +828,10 @@ pub fn draw() void {
|
|||
ui.addch('?');
|
||||
ui.style(.hd);
|
||||
ui.addstr(" for help");
|
||||
if (main.config.imported) {
|
||||
if (main.config.binreader) {
|
||||
ui.move(0, ui.cols -| 11);
|
||||
ui.addstr("[from file]");
|
||||
} else if (main.config.imported) {
|
||||
ui.move(0, ui.cols -| 10);
|
||||
ui.addstr("[imported]");
|
||||
} else if (!main.config.can_delete.?) {
|
||||
|
|
@ -741,12 +845,7 @@ pub fn draw() void {
|
|||
ui.move(1,3);
|
||||
ui.addch(' ');
|
||||
ui.style(.dir);
|
||||
|
||||
var pathbuf = std.ArrayList(u8).init(main.allocator);
|
||||
dir_parent.fmtPath(true, &pathbuf);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&pathbuf)), ui.cols -| 5));
|
||||
pathbuf.deinit();
|
||||
|
||||
ui.addstr(ui.shorten(ui.toUtf8(dir_path), ui.cols -| 5));
|
||||
ui.style(.default);
|
||||
ui.addch(' ');
|
||||
|
||||
|
|
@ -754,7 +853,7 @@ pub fn draw() void {
|
|||
if (cursor_idx < current_view.top) current_view.top = cursor_idx;
|
||||
if (cursor_idx >= current_view.top + numrows) current_view.top = cursor_idx - numrows + 1;
|
||||
|
||||
var i: u32 = 0;
|
||||
var i: u32 = if (dir_loading > 0) numrows else 0;
|
||||
var sel_row: u32 = 0;
|
||||
while (i < numrows) : (i += 1) {
|
||||
if (i+current_view.top >= dir_items.items.len) break;
|
||||
|
|
@ -771,6 +870,10 @@ pub fn draw() void {
|
|||
ui.move(ui.rows-1, 0);
|
||||
ui.hline(' ', ui.cols);
|
||||
ui.move(ui.rows-1, 0);
|
||||
if (dir_loading > 0) {
|
||||
ui.addstr(" Loading... ");
|
||||
ui.addnum(.hd, dir_loading);
|
||||
} else {
|
||||
ui.addch(if (main.config.show_blocks) '*' else ' ');
|
||||
ui.style(if (main.config.show_blocks) .bold_hd else .hd);
|
||||
ui.addstr("Total disk usage: ");
|
||||
|
|
@ -782,6 +885,7 @@ pub fn draw() void {
|
|||
ui.addsize(.hd, dir_parent.entry.size);
|
||||
ui.addstr(" Items: ");
|
||||
ui.addnum(.hd, dir_parent.items);
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
.main => {},
|
||||
|
|
@ -790,10 +894,14 @@ pub fn draw() void {
|
|||
.info => info.draw(),
|
||||
}
|
||||
if (message) |m| {
|
||||
const box = ui.Box.create(6, 60, "Message");
|
||||
box.move(2, 2);
|
||||
ui.addstr(m);
|
||||
box.move(4, 33);
|
||||
const box = ui.Box.create(@intCast(m.len + 5), 60, "Message");
|
||||
i = 2;
|
||||
for (m) |ln| {
|
||||
box.move(i, 2);
|
||||
ui.addstr(ln);
|
||||
i += 1;
|
||||
}
|
||||
box.move(i+1, 33);
|
||||
ui.addstr("Press any key to continue");
|
||||
}
|
||||
if (sel_row > 0) ui.move(sel_row, 0);
|
||||
|
|
@ -804,27 +912,29 @@ fn sortToggle(col: main.config.SortCol, default_order: main.config.SortOrder) vo
|
|||
else if (main.config.sort_order == .asc) main.config.sort_order = .desc
|
||||
else main.config.sort_order = .asc;
|
||||
main.config.sort_col = col;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
|
||||
fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool {
|
||||
switch (ch) {
|
||||
'j', ui.c.KEY_DOWN => {
|
||||
'j', c.KEY_DOWN => {
|
||||
if (idx.*+1 < len) idx.* += 1;
|
||||
},
|
||||
'k', ui.c.KEY_UP => {
|
||||
'k', c.KEY_UP => {
|
||||
if (idx.* > 0) idx.* -= 1;
|
||||
},
|
||||
ui.c.KEY_HOME => idx.* = 0,
|
||||
ui.c.KEY_END, ui.c.KEY_LL => idx.* = len -| 1,
|
||||
ui.c.KEY_PPAGE => idx.* = idx.* -| page,
|
||||
ui.c.KEY_NPAGE => idx.* = @min(len -| 1, idx.* + page),
|
||||
c.KEY_HOME => idx.* = 0,
|
||||
c.KEY_END, c.KEY_LL => idx.* = len -| 1,
|
||||
c.KEY_PPAGE => idx.* = idx.* -| page,
|
||||
c.KEY_NPAGE => idx.* = @min(len -| 1, idx.* + page),
|
||||
else => return false,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
if (dir_loading > 0) return;
|
||||
|
||||
defer current_view.save();
|
||||
|
||||
if (message != null) {
|
||||
|
|
@ -844,23 +954,32 @@ pub fn keyInput(ch: i32) void {
|
|||
'?' => state = .help,
|
||||
'i' => if (dir_items.items.len > 0) info.set(dir_items.items[cursor_idx], .info),
|
||||
'r' => {
|
||||
if (!main.config.can_refresh.?)
|
||||
message = "Directory refresh feature disabled."
|
||||
if (main.config.binreader)
|
||||
message = &.{"Refresh feature is not available when reading from file."}
|
||||
else if (!main.config.can_refresh.? and main.config.imported)
|
||||
message = &.{"Refresh feature disabled.", "Re-run with --enable-refresh to enable this option."}
|
||||
else if (!main.config.can_refresh.?)
|
||||
message = &.{"Directory refresh feature disabled."}
|
||||
else {
|
||||
main.state = .refresh;
|
||||
scan.setupRefresh(dir_parent);
|
||||
sink.global.sink = .mem;
|
||||
mem_sink.global.root = dir_parent;
|
||||
}
|
||||
},
|
||||
'b' => {
|
||||
if (!main.config.can_shell.?)
|
||||
message = "Shell feature disabled."
|
||||
message = &.{"Shell feature disabled.", "Re-run with --enable-shell to enable this option."}
|
||||
else
|
||||
main.state = .shell;
|
||||
},
|
||||
'd' => {
|
||||
if (dir_items.items.len == 0) {
|
||||
} else if (!main.config.can_delete.?)
|
||||
message = "Deletion feature disabled."
|
||||
} else if (main.config.binreader)
|
||||
message = &.{"File deletion is not available when reading from file."}
|
||||
else if (!main.config.can_delete.? and main.config.imported)
|
||||
message = &.{"File deletion is disabled.", "Re-run with --enable-delete to enable this option."}
|
||||
else if (!main.config.can_delete.?)
|
||||
message = &.{"File deletion is disabled."}
|
||||
else if (dir_items.items[cursor_idx]) |e| {
|
||||
main.state = .delete;
|
||||
const next =
|
||||
|
|
@ -878,45 +997,46 @@ pub fn keyInput(ch: i32) void {
|
|||
'M' => if (main.config.extended) sortToggle(.mtime, .desc),
|
||||
'e' => {
|
||||
main.config.show_hidden = !main.config.show_hidden;
|
||||
loadDir(null);
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
},
|
||||
't' => {
|
||||
main.config.sort_dirsfirst = !main.config.sort_dirsfirst;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
},
|
||||
'a' => {
|
||||
main.config.show_blocks = !main.config.show_blocks;
|
||||
if (main.config.show_blocks and main.config.sort_col == .size) {
|
||||
main.config.sort_col = .blocks;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
if (!main.config.show_blocks and main.config.sort_col == .blocks) {
|
||||
main.config.sort_col = .size;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
},
|
||||
|
||||
// Navigation
|
||||
10, 'l', ui.c.KEY_RIGHT => {
|
||||
10, 'l', c.KEY_RIGHT => {
|
||||
if (dir_items.items.len == 0) {
|
||||
} else if (dir_items.items[cursor_idx]) |e| {
|
||||
if (e.dir()) |d| {
|
||||
dir_parent = d;
|
||||
loadDir(null);
|
||||
enterSub(d);
|
||||
//dir_parent = d;
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
} else if (dir_parent.parent) |p| {
|
||||
dir_parent = p;
|
||||
loadDir(null);
|
||||
} else if (dir_parents.items.len > 1) {
|
||||
enterParent();
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
},
|
||||
'h', '<', ui.c.KEY_BACKSPACE, ui.c.KEY_LEFT => {
|
||||
if (dir_parent.parent) |p| {
|
||||
const e = dir_parent;
|
||||
dir_parent = p;
|
||||
loadDir(&e.entry);
|
||||
'h', '<', c.KEY_BACKSPACE, c.KEY_LEFT => {
|
||||
if (dir_parents.items.len > 1) {
|
||||
//const h = dir_parent.entry.nameHash();
|
||||
enterParent();
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
},
|
||||
|
|
|
|||
20
src/c.zig
Normal file
20
src/c.zig
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const c = @cImport({
|
||||
@cDefine("_XOPEN_SOURCE", "1"); // for wcwidth()
|
||||
@cInclude("stdio.h"); // fopen(), used to initialize ncurses
|
||||
@cInclude("string.h"); // strerror()
|
||||
@cInclude("time.h"); // strftime()
|
||||
@cInclude("wchar.h"); // wcwidth()
|
||||
@cInclude("locale.h"); // setlocale() and localeconv()
|
||||
@cInclude("fnmatch.h"); // fnmatch()
|
||||
@cInclude("unistd.h"); // getuid()
|
||||
@cInclude("sys/types.h"); // struct passwd
|
||||
@cInclude("pwd.h"); // getpwnam(), getpwuid()
|
||||
if (@import("builtin").os.tag == .linux) {
|
||||
@cInclude("sys/vfs.h"); // statfs()
|
||||
}
|
||||
@cInclude("curses.h");
|
||||
@cInclude("zstd.h");
|
||||
});
|
||||
134
src/delete.zig
134
src/delete.zig
|
|
@ -6,7 +6,11 @@ const main = @import("main.zig");
|
|||
const model = @import("model.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
var parent: *model.Dir = undefined;
|
||||
var entry: *model.Entry = undefined;
|
||||
|
|
@ -46,7 +50,7 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
|
|||
|
||||
if (entry.dir()) |d| {
|
||||
var fd = dir.openDirZ(path, .{ .no_follow = true, .iterate = false }) catch |e| return err(e);
|
||||
var it = &d.sub;
|
||||
var it = &d.sub.ptr;
|
||||
parent = d;
|
||||
defer parent = parent.parent.?;
|
||||
while (it.*) |n| {
|
||||
|
|
@ -55,15 +59,66 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
|
|||
return true;
|
||||
}
|
||||
if (it.* == n) // item deletion failed, make sure to still advance to next
|
||||
it = &n.next;
|
||||
it = &n.next.ptr;
|
||||
}
|
||||
fd.close();
|
||||
dir.deleteDirZ(path) catch |e|
|
||||
return if (e != error.DirNotEmpty or d.sub == null) err(e) else false;
|
||||
return if (e != error.DirNotEmpty or d.sub.ptr == null) err(e) else false;
|
||||
} else
|
||||
dir.deleteFileZ(path) catch |e| return err(e);
|
||||
ptr.*.?.delStats(parent);
|
||||
ptr.* = ptr.*.?.next;
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the item has been deleted successfully.
|
||||
fn deleteCmd(path: [:0]const u8, ptr: *align(1) ?*model.Entry) bool {
|
||||
{
|
||||
var env = std.process.getEnvMap(main.allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
env.put("NCDU_DELETE_PATH", path) catch unreachable;
|
||||
|
||||
// Since we're passing the path as an environment variable and go through
|
||||
// the shell anyway, we can refer to the variable and avoid error-prone
|
||||
// shell escaping.
|
||||
const cmd = std.fmt.allocPrint(main.allocator, "{s} \"$NCDU_DELETE_PATH\"", .{main.config.delete_command}) catch unreachable;
|
||||
defer main.allocator.free(cmd);
|
||||
ui.runCmd(&.{"/bin/sh", "-c", cmd}, null, &env, true);
|
||||
}
|
||||
|
||||
const stat = scan.statAt(std.fs.cwd(), path, false, null) catch {
|
||||
// Stat failed. Would be nice to display an error if it's not
|
||||
// 'FileNotFound', but w/e, let's just assume the item has been
|
||||
// deleted as expected.
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return true;
|
||||
};
|
||||
|
||||
// If either old or new entry is not a dir, remove & re-add entry in the in-memory tree.
|
||||
if (ptr.*.?.pack.etype != .dir or stat.etype != .dir) {
|
||||
ptr.*.?.zeroStats(parent);
|
||||
const e = model.Entry.create(main.allocator, stat.etype, main.config.extended and !stat.ext.isEmpty(), ptr.*.?.name());
|
||||
e.next.ptr = ptr.*.?.next.ptr;
|
||||
mem_sink.statToEntry(&stat, e, parent);
|
||||
ptr.* = e;
|
||||
|
||||
var it : ?*model.Dir = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
if (stat.etype != .link) {
|
||||
p.entry.pack.blocks +|= e.pack.blocks;
|
||||
p.entry.size +|= e.size;
|
||||
}
|
||||
p.items +|= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If new entry is a dir, recursively scan.
|
||||
if (ptr.*.?.dir()) |d| {
|
||||
main.state = .refresh;
|
||||
sink.global.sink = .mem;
|
||||
mem_sink.global.root = d;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -76,27 +131,34 @@ pub fn delete() ?*model.Entry {
|
|||
|
||||
// Find the pointer to this entry
|
||||
const e = entry;
|
||||
var it = &parent.sub;
|
||||
while (it.*) |n| : (it = &n.next)
|
||||
var it = &parent.sub.ptr;
|
||||
while (it.*) |n| : (it = &n.next.ptr)
|
||||
if (it.* == entry)
|
||||
break;
|
||||
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(true, &path);
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, true, &path);
|
||||
if (path.items.len == 0 or path.items[path.items.len-1] != '/')
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path), it);
|
||||
if (main.config.delete_command.len == 0) {
|
||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (it.* == e) e else next_sel;
|
||||
} else {
|
||||
const isdel = deleteCmd(util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (isdel) next_sel else it.*;
|
||||
}
|
||||
}
|
||||
|
||||
fn drawConfirm() void {
|
||||
browser.draw();
|
||||
const box = ui.Box.create(6, 60, "Confirm delete");
|
||||
box.move(1, 2);
|
||||
if (main.config.delete_command.len == 0) {
|
||||
ui.addstr("Are you sure you want to delete \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 21));
|
||||
ui.addch('"');
|
||||
|
|
@ -106,6 +168,15 @@ fn drawConfirm() void {
|
|||
box.move(2, 18);
|
||||
ui.addstr("and all of its contents?");
|
||||
}
|
||||
} else {
|
||||
ui.addstr("Are you sure you want to run \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(main.config.delete_command), 25));
|
||||
ui.addch('"');
|
||||
box.move(2, 4);
|
||||
ui.addstr("on \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 50));
|
||||
ui.addch('"');
|
||||
}
|
||||
|
||||
box.move(4, 15);
|
||||
ui.style(if (confirm == .yes) .sel else .default);
|
||||
|
|
@ -118,20 +189,25 @@ fn drawConfirm() void {
|
|||
box.move(4, 31);
|
||||
ui.style(if (confirm == .ignore) .sel else .default);
|
||||
ui.addstr("don't ask me again");
|
||||
box.move(4, switch (confirm) {
|
||||
.yes => 15,
|
||||
.no => 25,
|
||||
.ignore => 31
|
||||
});
|
||||
}
|
||||
|
||||
fn drawProgress() void {
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(false, &path);
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
// TODO: Item counts and progress bar would be nice.
|
||||
|
||||
const box = ui.Box.create(6, 60, "Deleting...");
|
||||
box.move(2, 2);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 56));
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 56));
|
||||
box.move(4, 41);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
|
|
@ -141,16 +217,16 @@ fn drawProgress() void {
|
|||
}
|
||||
|
||||
fn drawErr() void {
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(false, &path);
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
const box = ui.Box.create(6, 60, "Error");
|
||||
box.move(1, 2);
|
||||
ui.addstr("Error deleting ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 41));
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 41));
|
||||
box.move(2, 4);
|
||||
ui.addstr(ui.errorString(error_code));
|
||||
|
||||
|
|
@ -178,11 +254,11 @@ pub fn draw() void {
|
|||
pub fn keyInput(ch: i32) void {
|
||||
switch (state) {
|
||||
.confirm => switch (ch) {
|
||||
'h', ui.c.KEY_LEFT => confirm = switch (confirm) {
|
||||
'h', c.KEY_LEFT => confirm = switch (confirm) {
|
||||
.ignore => .no,
|
||||
else => .yes,
|
||||
},
|
||||
'l', ui.c.KEY_RIGHT => confirm = switch (confirm) {
|
||||
'l', c.KEY_RIGHT => confirm = switch (confirm) {
|
||||
.yes => .no,
|
||||
else => .ignore,
|
||||
},
|
||||
|
|
@ -202,11 +278,11 @@ pub fn keyInput(ch: i32) void {
|
|||
main.state = .browse;
|
||||
},
|
||||
.err => switch (ch) {
|
||||
'h', ui.c.KEY_LEFT => error_option = switch (error_option) {
|
||||
'h', c.KEY_LEFT => error_option = switch (error_option) {
|
||||
.all => .ignore,
|
||||
else => .abort,
|
||||
},
|
||||
'l', ui.c.KEY_RIGHT => error_option = switch (error_option) {
|
||||
'l', c.KEY_RIGHT => error_option = switch (error_option) {
|
||||
.abort => .ignore,
|
||||
else => .all,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const c = @cImport(@cInclude("fnmatch.h"));
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Reference:
|
||||
// https://manned.org/glob.7
|
||||
|
|
@ -123,7 +123,7 @@ test "parse" {
|
|||
fn PatternList(comptime withsub: bool) type {
|
||||
return struct {
|
||||
literals: std.HashMapUnmanaged(*const Pattern, Val, Ctx, 80) = .{},
|
||||
wild: std.ArrayListUnmanaged(*const Pattern) = .{},
|
||||
wild: std.ArrayListUnmanaged(*const Pattern) = .empty,
|
||||
|
||||
// Not a fan of the map-of-arrays approach in the 'withsub' case, it
|
||||
// has a lot of extra allocations. Linking the Patterns together in a
|
||||
|
|
|
|||
270
src/json_export.zig
Normal file
270
src/json_export.zig
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// JSON output is necessarily single-threaded and items MUST be added depth-first.
|
||||
|
||||
pub const global = struct {
|
||||
var writer: *Writer = undefined;
|
||||
};
|
||||
|
||||
|
||||
const ZstdWriter = struct {
|
||||
ctx: ?*c.ZSTD_CStream,
|
||||
out: c.ZSTD_outBuffer,
|
||||
outbuf: [c.ZSTD_BLOCKSIZE_MAX + 64]u8,
|
||||
|
||||
fn create() *ZstdWriter {
|
||||
const w = main.allocator.create(ZstdWriter) catch unreachable;
|
||||
w.out = .{
|
||||
.dst = &w.outbuf,
|
||||
.size = w.outbuf.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
w.ctx = c.ZSTD_createCStream();
|
||||
if (w.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
_ = c.ZSTD_CCtx_setParameter(w.ctx, c.ZSTD_c_compressionLevel, main.config.complevel);
|
||||
return w;
|
||||
}
|
||||
|
||||
fn destroy(w: *ZstdWriter) void {
|
||||
_ = c.ZSTD_freeCStream(w.ctx);
|
||||
main.allocator.destroy(w);
|
||||
}
|
||||
|
||||
fn write(w: *ZstdWriter, f: std.fs.File, in: []const u8, flush: bool) !void {
|
||||
var arg = c.ZSTD_inBuffer{
|
||||
.src = in.ptr,
|
||||
.size = in.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
const v = c.ZSTD_compressStream2(w.ctx, &w.out, &arg, if (flush) c.ZSTD_e_end else c.ZSTD_e_continue);
|
||||
if (c.ZSTD_isError(v) != 0) return error.ZstdCompressError;
|
||||
if (flush or w.out.pos > w.outbuf.len / 2) {
|
||||
try f.writeAll(w.outbuf[0..w.out.pos]);
|
||||
w.out.pos = 0;
|
||||
}
|
||||
if (!flush and arg.pos == arg.size) break;
|
||||
if (flush and v == 0) break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = struct {
|
||||
fd: std.fs.File,
|
||||
zstd: ?*ZstdWriter = null,
|
||||
// Must be large enough to hold PATH_MAX*6 plus some overhead.
|
||||
// (The 6 is because, in the worst case, every byte expands to a "\u####"
|
||||
// escape, and we do pessimistic estimates here in order to avoid checking
|
||||
// buffer lengths for each and every write operation)
|
||||
buf: [64*1024]u8 = undefined,
|
||||
off: usize = 0,
|
||||
dir_entry_open: bool = false,
|
||||
|
||||
fn flush(ctx: *Writer, bytes: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
// This can only really happen when the root path exceeds PATH_MAX,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (bytes > ctx.buf.len) ui.die("Error writing JSON export: path too long.\n", .{});
|
||||
const buf = ctx.buf[0..ctx.off];
|
||||
(if (ctx.zstd) |z| z.write(ctx.fd, buf, bytes == 0) else ctx.fd.writeAll(buf)) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
ctx.off = 0;
|
||||
}
|
||||
|
||||
fn ensureSpace(ctx: *Writer, bytes: usize) void {
|
||||
if (bytes > ctx.buf.len - ctx.off) ctx.flush(bytes);
|
||||
}
|
||||
|
||||
fn write(ctx: *Writer, s: []const u8) void {
|
||||
@memcpy(ctx.buf[ctx.off..][0..s.len], s);
|
||||
ctx.off += s.len;
|
||||
}
|
||||
|
||||
fn writeByte(ctx: *Writer, b: u8) void {
|
||||
ctx.buf[ctx.off] = b;
|
||||
ctx.off += 1;
|
||||
}
|
||||
|
||||
// Write escaped string contents, excluding the quotes.
|
||||
fn writeStr(ctx: *Writer, s: []const u8) void {
|
||||
for (s) |b| {
|
||||
if (b >= 0x20 and b != '"' and b != '\\' and b != 127) ctx.writeByte(b)
|
||||
else switch (b) {
|
||||
'\n' => ctx.write("\\n"),
|
||||
'\r' => ctx.write("\\r"),
|
||||
0x8 => ctx.write("\\b"),
|
||||
'\t' => ctx.write("\\t"),
|
||||
0xC => ctx.write("\\f"),
|
||||
'\\' => ctx.write("\\\\"),
|
||||
'"' => ctx.write("\\\""),
|
||||
else => {
|
||||
ctx.write("\\u00");
|
||||
const hexdig = "0123456789abcdef";
|
||||
ctx.writeByte(hexdig[b>>4]);
|
||||
ctx.writeByte(hexdig[b&0xf]);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeUint(ctx: *Writer, n: u64) void {
|
||||
// Based on std.fmt.formatInt
|
||||
var a = n;
|
||||
var buf: [24]u8 = undefined;
|
||||
var index: usize = buf.len;
|
||||
while (a >= 100) : (a = @divTrunc(a, 100)) {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a % 100)));
|
||||
}
|
||||
if (a < 10) {
|
||||
index -= 1;
|
||||
buf[index] = '0' + @as(u8, @intCast(a));
|
||||
} else {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a)));
|
||||
}
|
||||
ctx.write(buf[index..]);
|
||||
}
|
||||
|
||||
fn init(out: std.fs.File) *Writer {
|
||||
var ctx = main.allocator.create(Writer) catch unreachable;
|
||||
ctx.* = .{ .fd = out };
|
||||
if (main.config.compress) ctx.zstd = ZstdWriter.create();
|
||||
ctx.write("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
||||
ctx.writeUint(@intCast(@max(0, std.time.timestamp())));
|
||||
ctx.writeByte('}');
|
||||
return ctx;
|
||||
}
|
||||
|
||||
// A newly written directory entry is left "open", i.e. the '}' to close
|
||||
// the item object is not written, to allow for a setReadError() to be
|
||||
// caught if one happens before the first sub entry.
|
||||
// Any read errors after the first sub entry are thrown away, but that's
|
||||
// just a limitation of the JSON format.
|
||||
fn closeDirEntry(ctx: *Writer, rderr: bool) void {
|
||||
if (ctx.dir_entry_open) {
|
||||
ctx.dir_entry_open = false;
|
||||
if (rderr) ctx.write(",\"read_error\":true");
|
||||
ctx.writeByte('}');
|
||||
}
|
||||
}
|
||||
|
||||
fn writeSpecial(ctx: *Writer, name: []const u8, t: model.EType) void {
|
||||
ctx.closeDirEntry(false);
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (t.isDirectory()) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.write(switch (t) {
|
||||
.err => "\",\"read_error\":true}",
|
||||
.otherfs => "\",\"excluded\":\"otherfs\"}",
|
||||
.kernfs => "\",\"excluded\":\"kernfs\"}",
|
||||
.pattern => "\",\"excluded\":\"pattern\"}",
|
||||
else => unreachable,
|
||||
});
|
||||
if (t.isDirectory()) ctx.writeByte(']');
|
||||
}
|
||||
|
||||
fn writeStat(ctx: *Writer, name: []const u8, stat: *const sink.Stat, parent_dev: u64) void {
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (stat.etype == .dir) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.writeByte('"');
|
||||
if (stat.size > 0) {
|
||||
ctx.write(",\"asize\":");
|
||||
ctx.writeUint(stat.size);
|
||||
}
|
||||
if (stat.blocks > 0) {
|
||||
ctx.write(",\"dsize\":");
|
||||
ctx.writeUint(util.blocksToSize(stat.blocks));
|
||||
}
|
||||
if (stat.etype == .dir and stat.dev != parent_dev) {
|
||||
ctx.write(",\"dev\":");
|
||||
ctx.writeUint(stat.dev);
|
||||
}
|
||||
if (stat.etype == .link) {
|
||||
ctx.write(",\"ino\":");
|
||||
ctx.writeUint(stat.ino);
|
||||
ctx.write(",\"hlnkc\":true,\"nlink\":");
|
||||
ctx.writeUint(stat.nlink);
|
||||
}
|
||||
if (stat.etype == .nonreg) ctx.write(",\"notreg\":true");
|
||||
if (main.config.extended) {
|
||||
if (stat.ext.pack.hasuid) {
|
||||
ctx.write(",\"uid\":");
|
||||
ctx.writeUint(stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
ctx.write(",\"gid\":");
|
||||
ctx.writeUint(stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
ctx.write(",\"mode\":");
|
||||
ctx.writeUint(stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
ctx.write(",\"mtime\":");
|
||||
ctx.writeUint(stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Dir = struct {
|
||||
dev: u64,
|
||||
|
||||
pub fn addSpecial(_: *Dir, name: []const u8, sp: model.EType) void {
|
||||
global.writer.writeSpecial(name, sp);
|
||||
}
|
||||
|
||||
pub fn addStat(_: *Dir, name: []const u8, stat: *const sink.Stat) void {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, undefined);
|
||||
global.writer.writeByte('}');
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, d.dev);
|
||||
global.writer.dir_entry_open = true;
|
||||
return .{ .dev = stat.dev };
|
||||
}
|
||||
|
||||
pub fn setReadError(_: *Dir) void {
|
||||
global.writer.closeDirEntry(true);
|
||||
}
|
||||
|
||||
pub fn final(_: *Dir) void {
|
||||
global.writer.ensureSpace(1000);
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeByte(']');
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
var root = Dir{.dev=0};
|
||||
return root.addDir(path, stat);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
global.writer.write("]\n");
|
||||
global.writer.flush(0);
|
||||
if (global.writer.zstd) |z| z.destroy();
|
||||
global.writer.fd.close();
|
||||
main.allocator.destroy(global.writer);
|
||||
}
|
||||
|
||||
pub fn setupOutput(out: std.fs.File) void {
|
||||
global.writer = Writer.init(out);
|
||||
}
|
||||
562
src/json_import.zig
Normal file
562
src/json_import.zig
Normal file
|
|
@ -0,0 +1,562 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const ZstdReader = struct {
|
||||
ctx: ?*c.ZSTD_DStream,
|
||||
in: c.ZSTD_inBuffer,
|
||||
lastret: usize = 0,
|
||||
inbuf: [c.ZSTD_BLOCKSIZE_MAX + 16]u8, // This ZSTD_DStreamInSize() + a little bit extra
|
||||
|
||||
fn create(head: []const u8) *ZstdReader {
|
||||
const r = main.allocator.create(ZstdReader) catch unreachable;
|
||||
@memcpy(r.inbuf[0..head.len], head);
|
||||
r.in = .{
|
||||
.src = &r.inbuf,
|
||||
.size = head.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
r.ctx = c.ZSTD_createDStream();
|
||||
if (r.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
fn destroy(r: *ZstdReader) void {
|
||||
_ = c.ZSTD_freeDStream(r.ctx);
|
||||
main.allocator.destroy(r);
|
||||
}
|
||||
|
||||
fn read(r: *ZstdReader, f: std.fs.File, out: []u8) !usize {
|
||||
while (true) {
|
||||
if (r.in.size == r.in.pos) {
|
||||
r.in.pos = 0;
|
||||
r.in.size = try f.read(&r.inbuf);
|
||||
if (r.in.size == 0) {
|
||||
if (r.lastret == 0) return 0;
|
||||
return error.ZstdDecompressError; // Early EOF
|
||||
}
|
||||
}
|
||||
|
||||
var arg = c.ZSTD_outBuffer{ .dst = out.ptr, .size = out.len, .pos = 0 };
|
||||
r.lastret = c.ZSTD_decompressStream(r.ctx, &arg, &r.in);
|
||||
if (c.ZSTD_isError(r.lastret) != 0) return error.ZstdDecompressError;
|
||||
if (arg.pos > 0) return arg.pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Using a custom JSON parser here because, while std.json is great, it does
|
||||
// perform strict UTF-8 validation. Which is correct, of course, but ncdu dumps
|
||||
// are not always correct JSON as they may contain non-UTF-8 paths encoded as
|
||||
// strings.
|
||||
|
||||
const Parser = struct {
|
||||
rd: std.fs.File,
|
||||
zstd: ?*ZstdReader = null,
|
||||
rdoff: usize = 0,
|
||||
rdsize: usize = 0,
|
||||
byte: u64 = 1,
|
||||
line: u64 = 1,
|
||||
buf: [129*1024]u8 = undefined,
|
||||
|
||||
fn die(p: *Parser, str: []const u8) noreturn {
|
||||
ui.die("Error importing file on line {}:{}: {s}.\n", .{ p.line, p.byte, str });
|
||||
}
|
||||
|
||||
// Feed back a byte that has just been returned by nextByte()
|
||||
fn undoNextByte(p: *Parser, b: u8) void {
|
||||
p.byte -= 1;
|
||||
p.rdoff -= 1;
|
||||
p.buf[p.rdoff] = b;
|
||||
}
|
||||
|
||||
fn fill(p: *Parser) void {
|
||||
p.rdoff = 0;
|
||||
p.rdsize = (if (p.zstd) |z| z.read(p.rd, &p.buf) else p.rd.read(&p.buf)) catch |e| switch (e) {
|
||||
error.IsDir => p.die("not a file"), // should be detected at open() time, but no flag for that...
|
||||
error.SystemResources => p.die("out of memory"),
|
||||
error.ZstdDecompressError => p.die("decompression error"),
|
||||
else => p.die("I/O error"),
|
||||
};
|
||||
}
|
||||
|
||||
// Returns 0 on EOF.
|
||||
// (or if the file contains a 0 byte, but that's invalid anyway)
|
||||
// (Returning a '?u8' here is nicer but kills performance by about +30%)
|
||||
fn nextByte(p: *Parser) u8 {
|
||||
if (p.rdoff == p.rdsize) {
|
||||
@branchHint(.unlikely);
|
||||
p.fill();
|
||||
if (p.rdsize == 0) return 0;
|
||||
}
|
||||
p.byte += 1;
|
||||
defer p.rdoff += 1;
|
||||
return (&p.buf)[p.rdoff];
|
||||
}
|
||||
|
||||
// next non-whitespace byte
|
||||
fn nextChr(p: *Parser) u8 {
|
||||
while (true) switch (p.nextByte()) {
|
||||
'\n' => {
|
||||
p.line += 1;
|
||||
p.byte = 1;
|
||||
},
|
||||
' ', '\t', '\r' => {},
|
||||
else => |b| return b,
|
||||
};
|
||||
}
|
||||
|
||||
fn expectLit(p: *Parser, lit: []const u8) void {
|
||||
for (lit) |b| if (b != p.nextByte()) p.die("invalid JSON");
|
||||
}
|
||||
|
||||
fn hexdig(p: *Parser) u16 {
|
||||
const b = p.nextByte();
|
||||
return switch (b) {
|
||||
'0'...'9' => b - '0',
|
||||
'a'...'f' => b - 'a' + 10,
|
||||
'A'...'F' => b - 'A' + 10,
|
||||
else => p.die("invalid hex digit"),
|
||||
};
|
||||
}
|
||||
|
||||
fn stringContentSlow(p: *Parser, buf: []u8, head: u8, off: usize) []u8 {
|
||||
@branchHint(.unlikely);
|
||||
var b = head;
|
||||
var n = off;
|
||||
while (true) {
|
||||
switch (b) {
|
||||
'"' => break,
|
||||
'\\' => switch (p.nextByte()) {
|
||||
'"' => if (n < buf.len) { buf[n] = '"'; n += 1; },
|
||||
'\\'=> if (n < buf.len) { buf[n] = '\\';n += 1; },
|
||||
'/' => if (n < buf.len) { buf[n] = '/'; n += 1; },
|
||||
'b' => if (n < buf.len) { buf[n] = 0x8; n += 1; },
|
||||
'f' => if (n < buf.len) { buf[n] = 0xc; n += 1; },
|
||||
'n' => if (n < buf.len) { buf[n] = 0xa; n += 1; },
|
||||
'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
|
||||
't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
|
||||
'u' => {
|
||||
const first = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
var unit = @as(u21, first);
|
||||
if (std.unicode.utf16IsLowSurrogate(first)) p.die("Unexpected low surrogate");
|
||||
if (std.unicode.utf16IsHighSurrogate(first)) {
|
||||
p.expectLit("\\u");
|
||||
const second = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
unit = std.unicode.utf16DecodeSurrogatePair(&.{first, second}) catch p.die("Invalid low surrogate");
|
||||
}
|
||||
if (n + 6 < buf.len)
|
||||
n += std.unicode.utf8Encode(unit, buf[n..n+5]) catch unreachable;
|
||||
},
|
||||
else => p.die("invalid escape sequence"),
|
||||
},
|
||||
0x20, 0x21, 0x23...0x5b, 0x5d...0xff => if (n < buf.len) { buf[n] = b; n += 1; },
|
||||
else => p.die("invalid character in string"),
|
||||
}
|
||||
b = p.nextByte();
|
||||
}
|
||||
return buf[0..n];
|
||||
}
|
||||
|
||||
// Read a string (after the ") into buf.
|
||||
// Any characters beyond the size of the buffer are consumed but otherwise discarded.
|
||||
fn stringContent(p: *Parser, buf: []u8) []u8 {
|
||||
// The common case (for ncdu dumps): string fits in the given buffer and does not contain any escapes.
|
||||
var n: usize = 0;
|
||||
var b = p.nextByte();
|
||||
while (n < buf.len and b >= 0x20 and b != '"' and b != '\\') {
|
||||
buf[n] = b;
|
||||
n += 1;
|
||||
b = p.nextByte();
|
||||
}
|
||||
if (b == '"') return buf[0..n];
|
||||
return p.stringContentSlow(buf, b, n);
|
||||
}
|
||||
|
||||
fn string(p: *Parser, buf: []u8) []u8 {
|
||||
if (p.nextChr() != '"') p.die("expected string");
|
||||
return p.stringContent(buf);
|
||||
}
|
||||
|
||||
fn uintTail(p: *Parser, head: u8, T: anytype) T {
|
||||
if (head == '0') return 0;
|
||||
var v: T = head - '0'; // Assumption: T >= u8
|
||||
// Assumption: we don't parse JSON "documents" that are a bare uint.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'0'...'9' => |b| {
|
||||
const newv = v *% 10 +% (b - '0');
|
||||
if (newv < v) p.die("integer out of range");
|
||||
v = newv;
|
||||
},
|
||||
else => |b| break p.undoNextByte(b),
|
||||
};
|
||||
if (v == 0) p.die("expected number");
|
||||
return v;
|
||||
}
|
||||
|
||||
fn uint(p: *Parser, T: anytype) T {
|
||||
switch (p.nextChr()) {
|
||||
'0'...'9' => |b| return p.uintTail(b, T),
|
||||
else => p.die("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
fn boolean(p: *Parser) bool {
|
||||
switch (p.nextChr()) {
|
||||
't' => { p.expectLit("rue"); return true; },
|
||||
'f' => { p.expectLit("alse"); return false; },
|
||||
else => p.die("expected boolean"),
|
||||
}
|
||||
}
|
||||
|
||||
fn obj(p: *Parser) void {
|
||||
if (p.nextChr() != '{') p.die("expected object");
|
||||
}
|
||||
|
||||
fn key(p: *Parser, first: bool, buf: []u8) ?[]u8 {
|
||||
const k = switch (p.nextChr()) {
|
||||
',' => blk: {
|
||||
if (first) p.die("invalid JSON");
|
||||
break :blk p.string(buf);
|
||||
},
|
||||
'"' => blk: {
|
||||
if (!first) p.die("invalid JSON");
|
||||
break :blk p.stringContent(buf);
|
||||
},
|
||||
'}' => return null,
|
||||
else => p.die("invalid JSON"),
|
||||
};
|
||||
if (p.nextChr() != ':') p.die("invalid JSON");
|
||||
return k;
|
||||
}
|
||||
|
||||
fn array(p: *Parser) void {
|
||||
if (p.nextChr() != '[') p.die("expected array");
|
||||
}
|
||||
|
||||
fn elem(p: *Parser, first: bool) bool {
|
||||
switch (p.nextChr()) {
|
||||
',' => if (first) p.die("invalid JSON") else return true,
|
||||
']' => return false,
|
||||
else => |b| {
|
||||
if (!first) p.die("invalid JSON");
|
||||
p.undoNextByte(b);
|
||||
return true;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn skipContent(p: *Parser, head: u8) void {
|
||||
switch (head) {
|
||||
't' => p.expectLit("rue"),
|
||||
'f' => p.expectLit("alse"),
|
||||
'n' => p.expectLit("ull"),
|
||||
'-', '0'...'9' =>
|
||||
// Numbers are kind of annoying, this "parsing" is invalid and ultra-lazy.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'-', '+', 'e', 'E', '.', '0'...'9' => {},
|
||||
else => |b| return p.undoNextByte(b),
|
||||
},
|
||||
'"' => _ = p.stringContent(&[0]u8{}),
|
||||
'[' => {
|
||||
var first = true;
|
||||
while (p.elem(first)) {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
'{' => {
|
||||
var first = true;
|
||||
while (p.key(first, &[0]u8{})) |_| {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
else => p.die("invalid JSON"),
|
||||
}
|
||||
}
|
||||
|
||||
fn skip(p: *Parser) void {
|
||||
p.skipContent(p.nextChr());
|
||||
}
|
||||
|
||||
fn eof(p: *Parser) void {
|
||||
if (p.nextChr() != 0) p.die("trailing garbage");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Should really add some invalid JSON test cases as well, but I'd first like
|
||||
// to benchmark the performance impact of using error returns instead of
|
||||
// calling ui.die().
|
||||
test "JSON parser" {
|
||||
const json =
|
||||
\\{
|
||||
\\ "null": null,
|
||||
\\ "true": true,
|
||||
\\ "false": false,
|
||||
\\ "zero":0 ,"uint": 123,
|
||||
\\ "emptyObj": {},
|
||||
\\ "emptyArray": [],
|
||||
\\ "emptyString": "",
|
||||
\\ "encString": "\"\\\/\b\f\n\uBe3F",
|
||||
\\ "numbers": [0,1,20,-300, 3.4 ,0e-10 , -100.023e+13 ]
|
||||
\\}
|
||||
;
|
||||
var p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
p.skip();
|
||||
|
||||
p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
var buf: [128]u8 = undefined;
|
||||
p.obj();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(true, &buf).?, "null");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "true");
|
||||
try std.testing.expect(p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "false");
|
||||
try std.testing.expect(!p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "zero");
|
||||
try std.testing.expectEqual(0, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "uint");
|
||||
try std.testing.expectEqual(123, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyObj");
|
||||
p.obj();
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyArray");
|
||||
p.array();
|
||||
try std.testing.expect(!p.elem(true));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "encString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "\"\\/\x08\x0c\n\u{be3f}");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "numbers");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
}
|
||||
|
||||
|
||||
const Ctx = struct {
|
||||
p: *Parser,
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
rderr: bool = false,
|
||||
namelen: usize = 0,
|
||||
namebuf: [32*1024]u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
fn itemkey(ctx: *Ctx, key: []const u8) void {
|
||||
const eq = std.mem.eql;
|
||||
switch (if (key.len > 0) key[0] else @as(u8,0)) {
|
||||
'a' => {
|
||||
if (eq(u8, key, "asize")) {
|
||||
ctx.stat.size = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'd' => {
|
||||
if (eq(u8, key, "dsize")) {
|
||||
ctx.stat.blocks = @intCast(ctx.p.uint(u64)>>9);
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "dev")) {
|
||||
ctx.stat.dev = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'e' => {
|
||||
if (eq(u8, key, "excluded")) {
|
||||
var buf: [32]u8 = undefined;
|
||||
const typ = ctx.p.string(&buf);
|
||||
// "frmlnk" is also possible, but currently considered equivalent to "pattern".
|
||||
ctx.stat.etype =
|
||||
if (eq(u8, typ, "otherfs") or eq(u8, typ, "othfs")) .otherfs
|
||||
else if (eq(u8, typ, "kernfs")) .kernfs
|
||||
else .pattern;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'g' => {
|
||||
if (eq(u8, key, "gid")) {
|
||||
ctx.stat.ext.gid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasgid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'h' => {
|
||||
if (eq(u8, key, "hlnkc")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'i' => {
|
||||
if (eq(u8, key, "ino")) {
|
||||
ctx.stat.ino = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'm' => {
|
||||
if (eq(u8, key, "mode")) {
|
||||
ctx.stat.ext.mode = ctx.p.uint(u16);
|
||||
ctx.stat.ext.pack.hasmode = true;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "mtime")) {
|
||||
ctx.stat.ext.mtime = ctx.p.uint(u64);
|
||||
ctx.stat.ext.pack.hasmtime = true;
|
||||
// Accept decimal numbers, but discard the fractional part because our data model doesn't support it.
|
||||
switch (ctx.p.nextByte()) {
|
||||
'.' =>
|
||||
while (true) switch (ctx.p.nextByte()) {
|
||||
'0'...'9' => {},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
}
|
||||
}
|
||||
},
|
||||
'n' => {
|
||||
if (eq(u8, key, "name")) {
|
||||
if (ctx.namelen != 0) ctx.p.die("duplicate key");
|
||||
ctx.namelen = ctx.p.string(&ctx.namebuf).len;
|
||||
if (ctx.namelen > ctx.namebuf.len-5) ctx.p.die("too long file name");
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "nlink")) {
|
||||
ctx.stat.nlink = ctx.p.uint(u31);
|
||||
if (ctx.stat.etype != .dir and ctx.stat.nlink > 1)
|
||||
ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "notreg")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .nonreg;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'r' => {
|
||||
if (eq(u8, key, "read_error")) {
|
||||
if (ctx.p.boolean()) {
|
||||
if (ctx.stat.etype == .dir) ctx.rderr = true
|
||||
else ctx.stat.etype = .err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
},
|
||||
'u' => {
|
||||
if (eq(u8, key, "uid")) {
|
||||
ctx.stat.ext.uid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasuid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
ctx.p.skip();
|
||||
}
|
||||
|
||||
|
||||
fn item(ctx: *Ctx, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.namelen = 0;
|
||||
ctx.rderr = false;
|
||||
const isdir = switch (ctx.p.nextChr()) {
|
||||
'[' => blk: {
|
||||
ctx.p.obj();
|
||||
break :blk true;
|
||||
},
|
||||
'{' => false,
|
||||
else => ctx.p.die("expected object or array"),
|
||||
};
|
||||
if (parent == null and !isdir) ctx.p.die("parent item must be a directory");
|
||||
ctx.stat.etype = if (isdir) .dir else .reg;
|
||||
|
||||
var keybuf: [32]u8 = undefined;
|
||||
var first = true;
|
||||
while (ctx.p.key(first, &keybuf)) |k| {
|
||||
first = false;
|
||||
itemkey(ctx, k);
|
||||
}
|
||||
if (ctx.namelen == 0) ctx.p.die("missing \"name\" field");
|
||||
const name = (&ctx.namebuf)[0..ctx.namelen];
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const ndev = ctx.stat.dev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, name, &ctx.stat)
|
||||
else sink.createRoot(name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.rderr) dir.setReadError(ctx.sink);
|
||||
while (ctx.p.elem(false)) item(ctx, dir, ndev);
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
|
||||
} else {
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
parent.?.addSpecial(ctx.sink, name, ctx.stat.etype)
|
||||
else
|
||||
parent.?.addStat(ctx.sink, name, &ctx.stat);
|
||||
if (isdir and ctx.p.elem(false)) ctx.p.die("unexpected contents in an excluded directory");
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
|
||||
|
||||
pub fn import(fd: std.fs.File, head: []const u8) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
defer sink.done();
|
||||
|
||||
var p = Parser{.rd = fd};
|
||||
defer if (p.zstd) |z| z.destroy();
|
||||
|
||||
if (head.len >= 4 and std.mem.eql(u8, head[0..4], "\x28\xb5\x2f\xfd")) {
|
||||
p.zstd = ZstdReader.create(head);
|
||||
} else {
|
||||
p.rdsize = head.len;
|
||||
@memcpy(p.buf[0..head.len], head);
|
||||
}
|
||||
p.array();
|
||||
if (p.uint(u16) != 1) p.die("incompatible major format version");
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
_ = p.uint(u16); // minor version, ignored for now
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
|
||||
// metadata object
|
||||
p.obj();
|
||||
p.skipContent('{');
|
||||
|
||||
// Items
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
var ctx = Ctx{.p = &p, .sink = &sink_threads[0]};
|
||||
item(&ctx, null, 0);
|
||||
|
||||
// accept more trailing elements
|
||||
while (p.elem(false)) p.skip();
|
||||
p.eof();
|
||||
}
|
||||
413
src/main.zig
413
src/main.zig
|
|
@ -1,21 +1,35 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const program_version = "2.4";
|
||||
pub const program_version = "2.9.2";
|
||||
|
||||
const std = @import("std");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const json_import = @import("json_import.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const bin_reader = @import("bin_reader.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const delete = @import("delete.zig");
|
||||
const util = @import("util.zig");
|
||||
const exclude = @import("exclude.zig");
|
||||
const c = @cImport(@cInclude("locale.h"));
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
test "imports" {
|
||||
_ = model;
|
||||
_ = scan;
|
||||
_ = json_import;
|
||||
_ = json_export;
|
||||
_ = bin_export;
|
||||
_ = bin_reader;
|
||||
_ = sink;
|
||||
_ = mem_src;
|
||||
_ = mem_sink;
|
||||
_ = ui;
|
||||
_ = browser;
|
||||
_ = delete;
|
||||
|
|
@ -27,7 +41,7 @@ test "imports" {
|
|||
// This allocator never returns an error, it either succeeds or causes ncdu to quit.
|
||||
// (Which means you'll find a lot of "catch unreachable" sprinkled through the code,
|
||||
// they look scarier than they are)
|
||||
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: u8, return_address: usize) ?[*]u8 {
|
||||
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: std.mem.Alignment, return_address: usize) ?[*]u8 {
|
||||
while (true) {
|
||||
if (std.heap.c_allocator.vtable.alloc(undefined, len, ptr_alignment, return_address)) |r|
|
||||
return r
|
||||
|
|
@ -42,11 +56,21 @@ pub const allocator = std.mem.Allocator{
|
|||
.alloc = wrapAlloc,
|
||||
// AFAIK, all uses of resize() to grow an allocation will fall back to alloc() on failure.
|
||||
.resize = std.heap.c_allocator.vtable.resize,
|
||||
.remap = std.heap.c_allocator.vtable.remap,
|
||||
.free = std.heap.c_allocator.vtable.free,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Custom panic impl to reset the terminal before spewing out an error message.
|
||||
pub const panic = std.debug.FullPanic(struct {
|
||||
pub fn panicFn(msg: []const u8, first_trace_addr: ?usize) noreturn {
|
||||
@branchHint(.cold);
|
||||
ui.deinit();
|
||||
std.debug.defaultPanic(msg, first_trace_addr);
|
||||
}
|
||||
}.panicFn);
|
||||
|
||||
pub const config = struct {
|
||||
pub const SortCol = enum { name, blocks, size, items, mtime };
|
||||
pub const SortOrder = enum { asc, desc };
|
||||
|
|
@ -56,7 +80,10 @@ pub const config = struct {
|
|||
pub var follow_symlinks: bool = false;
|
||||
pub var exclude_caches: bool = false;
|
||||
pub var exclude_kernfs: bool = false;
|
||||
pub var exclude_patterns: std.ArrayList([:0]const u8) = std.ArrayList([:0]const u8).init(allocator);
|
||||
pub var threads: usize = 1;
|
||||
pub var complevel: u8 = 4;
|
||||
pub var compress: bool = false;
|
||||
pub var export_block_size: ?usize = null;
|
||||
|
||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||
pub var scan_ui: ?enum { none, line, full } = null;
|
||||
|
|
@ -79,16 +106,21 @@ pub const config = struct {
|
|||
pub var sort_natural: bool = true;
|
||||
|
||||
pub var imported: bool = false;
|
||||
pub var binreader: bool = false;
|
||||
pub var can_delete: ?bool = null;
|
||||
pub var can_shell: ?bool = null;
|
||||
pub var can_refresh: ?bool = null;
|
||||
pub var confirm_quit: bool = false;
|
||||
pub var confirm_delete: bool = true;
|
||||
pub var ignore_delete_errors: bool = false;
|
||||
pub var delete_command: [:0]const u8 = "";
|
||||
};
|
||||
|
||||
pub var state: enum { scan, browse, refresh, shell, delete } = .scan;
|
||||
|
||||
const stdin = if (@hasDecl(std.io, "getStdIn")) std.io.getStdIn() else std.fs.File.stdin();
|
||||
const stdout = if (@hasDecl(std.io, "getStdOut")) std.io.getStdOut() else std.fs.File.stdout();
|
||||
|
||||
// Simple generic argument parser, supports getopt_long() style arguments.
|
||||
const Args = struct {
|
||||
lst: []const [:0]const u8,
|
||||
|
|
@ -97,6 +129,7 @@ const Args = struct {
|
|||
last_arg: ?[:0]const u8 = null, // In the case of --option=<arg>
|
||||
shortbuf: [2]u8 = undefined,
|
||||
argsep: bool = false,
|
||||
ignerror: bool = false,
|
||||
|
||||
const Self = @This();
|
||||
const Option = struct {
|
||||
|
|
@ -126,22 +159,27 @@ const Args = struct {
|
|||
return .{ .opt = true, .val = &self.shortbuf };
|
||||
}
|
||||
|
||||
pub fn die(self: *const Self, comptime msg: []const u8, args: anytype) !noreturn {
|
||||
if (self.ignerror) return error.InvalidArg;
|
||||
ui.die(msg, args);
|
||||
}
|
||||
|
||||
/// Return the next option or positional argument.
|
||||
/// 'opt' indicates whether it's an option or positional argument,
|
||||
/// 'val' will be either -x, --something or the argument.
|
||||
pub fn next(self: *Self) ?Option {
|
||||
if (self.last_arg != null) ui.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||
pub fn next(self: *Self) !?Option {
|
||||
if (self.last_arg != null) try self.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||
if (self.short) |s| return self.shortopt(s);
|
||||
const val = self.pop() orelse return null;
|
||||
if (self.argsep or val.len == 0 or val[0] != '-') return Option{ .opt = false, .val = val };
|
||||
if (val.len == 1) ui.die("Invalid option '-'.\n", .{});
|
||||
if (val.len == 1) try self.die("Invalid option '-'.\n", .{});
|
||||
if (val.len == 2 and val[1] == '-') {
|
||||
self.argsep = true;
|
||||
return self.next();
|
||||
}
|
||||
if (val[1] == '-') {
|
||||
if (std.mem.indexOfScalar(u8, val, '=')) |sep| {
|
||||
if (sep == 2) ui.die("Invalid option '{s}'.\n", .{val});
|
||||
if (sep == 2) try self.die("Invalid option '{s}'.\n", .{val});
|
||||
self.last_arg = val[sep+1.. :0];
|
||||
self.last = val[0..sep];
|
||||
return Option{ .opt = true, .val = self.last.? };
|
||||
|
|
@ -153,7 +191,7 @@ const Args = struct {
|
|||
}
|
||||
|
||||
/// Returns the argument given to the last returned option. Dies with an error if no argument is provided.
|
||||
pub fn arg(self: *Self) [:0]const u8 {
|
||||
pub fn arg(self: *Self) ![:0]const u8 {
|
||||
if (self.short) |a| {
|
||||
defer self.short = null;
|
||||
return a;
|
||||
|
|
@ -163,11 +201,11 @@ const Args = struct {
|
|||
return a;
|
||||
}
|
||||
if (self.pop()) |o| return o;
|
||||
ui.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||
try self.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||
}
|
||||
};
|
||||
|
||||
fn argConfig(args: *Args, opt: Args.Option) bool {
|
||||
fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||
if (opt.is("-q") or opt.is("--slow-ui-updates")) config.update_delay = 2*std.time.ns_per_s
|
||||
else if (opt.is("--fast-ui-updates")) config.update_delay = 100*std.time.ns_per_ms
|
||||
else if (opt.is("-x") or opt.is("--one-file-system")) config.same_fs = true
|
||||
|
|
@ -197,13 +235,13 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
else if (opt.is("--enable-natsort")) config.sort_natural = true
|
||||
else if (opt.is("--disable-natsort")) config.sort_natural = false
|
||||
else if (opt.is("--graph-style")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "hash")) config.graph_style = .hash
|
||||
else if (std.mem.eql(u8, val, "half-block")) config.graph_style = .half
|
||||
else if (std.mem.eql(u8, val, "eighth-block") or std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eighth
|
||||
else ui.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||
else try args.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||
} else if (opt.is("--sort")) {
|
||||
var val: []const u8 = args.arg();
|
||||
var val: []const u8 = try args.arg();
|
||||
var ord: ?config.SortOrder = null;
|
||||
if (std.mem.endsWith(u8, val, "-asc")) {
|
||||
val = val[0..val.len-4];
|
||||
|
|
@ -227,13 +265,13 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
} else if (std.mem.eql(u8, val, "mtime")) {
|
||||
config.sort_col = .mtime;
|
||||
config.sort_order = ord orelse .asc;
|
||||
} else ui.die("Unknown --sort option: {s}.\n", .{val});
|
||||
} else try args.die("Unknown --sort option: {s}.\n", .{val});
|
||||
} else if (opt.is("--shared-column")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.show_shared = .off
|
||||
else if (std.mem.eql(u8, val, "shared")) config.show_shared = .shared
|
||||
else if (std.mem.eql(u8, val, "unique")) config.show_shared = .unique
|
||||
else ui.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||
else try args.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||
} else if (opt.is("--apparent-size")) config.show_blocks = false
|
||||
else if (opt.is("--disk-usage")) config.show_blocks = true
|
||||
else if (opt.is("-0")) config.scan_ui = .none
|
||||
|
|
@ -243,26 +281,45 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
else if (opt.is("--no-si")) config.si = false
|
||||
else if (opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
|
||||
else if (opt.is("--no-follow-symlinks")) config.follow_symlinks = false
|
||||
else if (opt.is("--exclude")) exclude.addPattern(args.arg())
|
||||
else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||
const arg = args.arg();
|
||||
readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||
else if (opt.is("--exclude")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
exclude.addPattern(arg);
|
||||
} else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
readExcludeFile(arg) catch |e| try args.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||
} else if (opt.is("--exclude-caches")) config.exclude_caches = true
|
||||
else if (opt.is("--include-caches")) config.exclude_caches = false
|
||||
else if (opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
||||
else if (opt.is("--include-kernfs")) config.exclude_kernfs = false
|
||||
else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||
else if (opt.is("-c") or opt.is("--compress")) config.compress = true
|
||||
else if (opt.is("--no-compress")) config.compress = false
|
||||
else if (opt.is("--compress-level")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
if (num <= 0 or num > 20) try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
config.complevel = num;
|
||||
} else if (opt.is("--export-block-size")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u14, val, 10) catch try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
if (num < 4 or num > 16000) try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
config.export_block_size = @as(usize, num) * 1024;
|
||||
} else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||
else if (opt.is("--no-confirm-quit")) config.confirm_quit = false
|
||||
else if (opt.is("--confirm-delete")) config.confirm_delete = true
|
||||
else if (opt.is("--no-confirm-delete")) config.confirm_delete = false
|
||||
else if (opt.is("--delete-command")) config.delete_command = allocator.dupeZ(u8, try args.arg()) catch unreachable
|
||||
else if (opt.is("--color")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.ui_color = .off
|
||||
else if (std.mem.eql(u8, val, "dark")) config.ui_color = .dark
|
||||
else if (std.mem.eql(u8, val, "dark-bg")) config.ui_color = .darkbg
|
||||
else ui.die("Unknown --color option: {s}.\n", .{val});
|
||||
} else return false;
|
||||
return true;
|
||||
else try args.die("Unknown --color option: {s}.\n", .{val});
|
||||
} else if (opt.is("-t") or opt.is("--threads")) {
|
||||
const val = try args.arg();
|
||||
config.threads = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number of --threads: {s}.\n", .{val});
|
||||
} else return error.UnknownOption;
|
||||
}
|
||||
|
||||
fn tryReadArgsFile(path: [:0]const u8) void {
|
||||
|
|
@ -273,157 +330,142 @@ fn tryReadArgsFile(path: [:0]const u8) void {
|
|||
};
|
||||
defer f.close();
|
||||
|
||||
var arglist = std.ArrayList([:0]const u8).init(allocator);
|
||||
|
||||
var rd_ = std.io.bufferedReader(f.reader());
|
||||
const rd = rd_.reader();
|
||||
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_fbs = std.io.fixedBufferStream(&line_buf);
|
||||
const line_writer = line_fbs.writer();
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
|
||||
while (true) : (line_fbs.reset()) {
|
||||
rd.streamUntilDelimiter(line_writer, '\n', line_buf.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (line_fbs.getPos() catch unreachable == 0) break,
|
||||
else => |e| ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) }),
|
||||
};
|
||||
const line_ = line_fbs.getWritten();
|
||||
while (true) {
|
||||
const line_ = (line_rd.read() catch |e|
|
||||
ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) })
|
||||
) orelse break;
|
||||
|
||||
var argc: usize = 0;
|
||||
var ignerror = false;
|
||||
var arglist: [2][:0]const u8 = .{ "", "" };
|
||||
|
||||
var line = std.mem.trim(u8, line_, &std.ascii.whitespace);
|
||||
if (line.len > 0 and line[0] == '@') {
|
||||
ignerror = true;
|
||||
line = line[1..];
|
||||
}
|
||||
if (line.len == 0 or line[0] == '#') continue;
|
||||
if (std.mem.indexOfAny(u8, line, " \t=")) |i| {
|
||||
arglist.append(allocator.dupeZ(u8, line[0..i]) catch unreachable) catch unreachable;
|
||||
arglist[argc] = allocator.dupeZ(u8, line[0..i]) catch unreachable;
|
||||
argc += 1;
|
||||
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.whitespace);
|
||||
}
|
||||
arglist.append(allocator.dupeZ(u8, line) catch unreachable) catch unreachable;
|
||||
}
|
||||
arglist[argc] = allocator.dupeZ(u8, line) catch unreachable;
|
||||
argc += 1;
|
||||
|
||||
var args = Args.init(arglist.items);
|
||||
while (args.next()) |opt| {
|
||||
if (!argConfig(&args, opt))
|
||||
var args = Args.init(arglist[0..argc]);
|
||||
args.ignerror = ignerror;
|
||||
while (args.next() catch null) |opt| {
|
||||
if (argConfig(&args, opt, true)) |_| {}
|
||||
else |_| {
|
||||
if (ignerror) break;
|
||||
ui.die("Unrecognized option in config file '{s}': {s}.\nRun with --ignore-config to skip reading config files.\n", .{path, opt.val});
|
||||
}
|
||||
for (arglist.items) |i| allocator.free(i);
|
||||
arglist.deinit();
|
||||
}
|
||||
allocator.free(arglist[0]);
|
||||
if (argc == 2) allocator.free(arglist[1]);
|
||||
}
|
||||
}
|
||||
|
||||
fn version() noreturn {
|
||||
const stdout = std.io.getStdOut();
|
||||
stdout.writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
fn help() noreturn {
|
||||
const stdout = std.io.getStdOut();
|
||||
stdout.writeAll(
|
||||
\\ncdu <options> <directory>
|
||||
\\
|
||||
\\Options:
|
||||
\\ -h,--help This help message
|
||||
\\ -q Quiet mode, refresh interval 2 seconds
|
||||
\\ -v,-V,--version Print version
|
||||
\\ -x Same filesystem
|
||||
\\ -e Enable extended information
|
||||
\\ -r Read only
|
||||
\\ -o FILE Export scanned directory to FILE
|
||||
\\Mode selection:
|
||||
\\ -h, --help This help message
|
||||
\\ -v, -V, --version Print version
|
||||
\\ -f FILE Import scanned directory from FILE
|
||||
\\ -0,-1,-2 UI to use when scanning (0=none,2=full ncurses)
|
||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||
\\ --confirm-quit Confirm quitting ncdu
|
||||
\\ --color SCHEME Set color scheme (off/dark/dark-bg)
|
||||
\\ -o FILE Export scanned directory to FILE in JSON format
|
||||
\\ -O FILE Export scanned directory to FILE in binary format
|
||||
\\ -e, --extended Enable extended information
|
||||
\\ --ignore-config Don't load config files
|
||||
\\
|
||||
\\Refer to `man ncdu` for the full list of options.
|
||||
\\Scan options:
|
||||
\\ -x, --one-file-system Stay on the same filesystem
|
||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||
\\ -t NUM Scan with NUM threads
|
||||
\\
|
||||
\\Export options:
|
||||
\\ -c, --compress Use Zstandard compression with `-o`
|
||||
\\ --compress-level NUM Set compression level
|
||||
\\ --export-block-size KIB Set export block size with `-O`
|
||||
\\
|
||||
\\Interface options:
|
||||
\\ -0, -1, -2 UI to use when scanning (0=none,2=full ncurses)
|
||||
\\ -q, --slow-ui-updates "Quiet" mode, refresh interval 2 seconds
|
||||
\\ --enable-shell Enable/disable shell spawning feature
|
||||
\\ --enable-delete Enable/disable file deletion feature
|
||||
\\ --enable-refresh Enable/disable directory refresh feature
|
||||
\\ -r Read only (--disable-delete)
|
||||
\\ -rr Read only++ (--disable-delete & --disable-shell)
|
||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||
\\ --apparent-size Show apparent size instead of disk usage by default
|
||||
\\ --hide-hidden Hide "hidden" or excluded files by default
|
||||
\\ --show-itemcount Show item count column by default
|
||||
\\ --show-mtime Show mtime column by default (requires `-e`)
|
||||
\\ --show-graph Show graph column by default
|
||||
\\ --show-percent Show percent column by default
|
||||
\\ --graph-style STYLE hash / half-block / eighth-block
|
||||
\\ --shared-column off / shared / unique
|
||||
\\ --sort COLUMN-(asc/desc) disk-usage / name / apparent-size / itemcount / mtime
|
||||
\\ --enable-natsort Use natural order when sorting by name
|
||||
\\ --group-directories-first Sort directories before files
|
||||
\\ --confirm-quit Ask confirmation before quitting ncdu
|
||||
\\ --no-confirm-delete Don't ask confirmation before deletion
|
||||
\\ --delete-command CMD Command to run for file deletion
|
||||
\\ --color SCHEME off / dark / dark-bg
|
||||
\\
|
||||
\\Refer to `man ncdu` for more information.
|
||||
\\
|
||||
) catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
fn spawnShell() void {
|
||||
ui.deinit();
|
||||
defer ui.init();
|
||||
|
||||
var path = std.ArrayList(u8).init(allocator);
|
||||
defer path.deinit();
|
||||
browser.dir_parent.fmtPath(true, &path);
|
||||
|
||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||
if (env.get("NCDU_LEVEL")) |l|
|
||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||
'0'...'8' => |d| &[1] u8{d+1},
|
||||
'9' => "9",
|
||||
else => "1"
|
||||
}) catch unreachable
|
||||
else
|
||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||
|
||||
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
||||
var child = std.process.Child.init(&.{shell}, allocator);
|
||||
child.cwd = path.items;
|
||||
child.env_map = &env;
|
||||
|
||||
const stdin = std.io.getStdIn();
|
||||
const stderr = std.io.getStdErr();
|
||||
const term = child.spawnAndWait() catch |e| blk: {
|
||||
stderr.writer().print(
|
||||
"Error spawning shell: {s}\n\nPress enter to continue.\n",
|
||||
.{ ui.errorString(e) }
|
||||
) catch {};
|
||||
stdin.reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
break :blk std.process.Child.Term{ .Exited = 0 };
|
||||
};
|
||||
if (term != .Exited) {
|
||||
const n = switch (term) {
|
||||
.Exited => "status",
|
||||
.Signal => "signal",
|
||||
.Stopped => "stopped",
|
||||
.Unknown => "unknown",
|
||||
};
|
||||
const v = switch (term) {
|
||||
.Exited => |v| v,
|
||||
.Signal => |v| v,
|
||||
.Stopped => |v| v,
|
||||
.Unknown => |v| v,
|
||||
};
|
||||
stderr.writer().print(
|
||||
"Shell returned with {s} code {}.\n\nPress enter to continue.\n", .{ n, v }
|
||||
) catch {};
|
||||
stdin.reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn readExcludeFile(path: [:0]const u8) !void {
|
||||
const f = try std.fs.cwd().openFileZ(path, .{});
|
||||
defer f.close();
|
||||
|
||||
var rd_ = std.io.bufferedReader(f.reader());
|
||||
const rd = rd_.reader();
|
||||
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_fbs = std.io.fixedBufferStream(&line_buf);
|
||||
const line_writer = line_fbs.writer();
|
||||
|
||||
while (true) : (line_fbs.reset()) {
|
||||
rd.streamUntilDelimiter(line_writer, '\n', line_buf.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (line_fbs.getPos() catch unreachable == 0) break,
|
||||
else => |e| return e,
|
||||
};
|
||||
const line = line_fbs.getWritten();
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
while (try line_rd.read()) |line| {
|
||||
if (line.len > 0)
|
||||
exclude.addPattern(line);
|
||||
}
|
||||
}
|
||||
|
||||
fn readImport(path: [:0]const u8) !void {
|
||||
const fd =
|
||||
if (std.mem.eql(u8, "-", path)) stdin
|
||||
else try std.fs.cwd().openFileZ(path, .{});
|
||||
errdefer fd.close();
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
if (8 != try fd.readAll(&buf)) return error.EndOfStream;
|
||||
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
|
||||
try bin_reader.open(fd);
|
||||
config.binreader = true;
|
||||
} else {
|
||||
json_import.import(fd, &buf);
|
||||
fd.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn main() void {
|
||||
ui.main_thread = std.Thread.getCurrentId();
|
||||
|
||||
// Grab thousands_sep from the current C locale.
|
||||
_ = c.setlocale(c.LC_ALL, "");
|
||||
if (c.localeconv()) |locale| {
|
||||
|
|
@ -456,16 +498,17 @@ pub fn main() void {
|
|||
}
|
||||
}
|
||||
|
||||
var scan_dir: ?[]const u8 = null;
|
||||
var scan_dir: ?[:0]const u8 = null;
|
||||
var import_file: ?[:0]const u8 = null;
|
||||
var export_file: ?[:0]const u8 = null;
|
||||
var export_json: ?[:0]const u8 = null;
|
||||
var export_bin: ?[:0]const u8 = null;
|
||||
var quit_after_scan = false;
|
||||
{
|
||||
const arglist = std.process.argsAlloc(allocator) catch unreachable;
|
||||
defer std.process.argsFree(allocator, arglist);
|
||||
var args = Args.init(arglist);
|
||||
_ = args.next(); // program name
|
||||
while (args.next()) |opt| {
|
||||
_ = args.next() catch unreachable; // program name
|
||||
while (args.next() catch unreachable) |opt| {
|
||||
if (!opt.opt) {
|
||||
// XXX: ncdu 1.x doesn't error, it just silently ignores all but the last argument.
|
||||
if (scan_dir != null) ui.die("Multiple directories given, see ncdu -h for help.\n", .{});
|
||||
|
|
@ -474,49 +517,68 @@ pub fn main() void {
|
|||
}
|
||||
if (opt.is("-h") or opt.is("-?") or opt.is("--help")) help()
|
||||
else if (opt.is("-v") or opt.is("-V") or opt.is("--version")) version()
|
||||
else if (opt.is("-o") and export_file != null) ui.die("The -o flag can only be given once.\n", .{})
|
||||
else if (opt.is("-o")) export_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("-o") and (export_json != null or export_bin != null)) ui.die("The -o flag can only be given once.\n", .{})
|
||||
else if (opt.is("-o")) export_json = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-O") and (export_json != null or export_bin != null)) ui.die("The -O flag can only be given once.\n", .{})
|
||||
else if (opt.is("-O")) export_bin = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
|
||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("--ignore-config")) {}
|
||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||
else if (argConfig(&args, opt)) {}
|
||||
else ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
else if (argConfig(&args, opt, false)) |_| {}
|
||||
else |_| ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
}
|
||||
}
|
||||
|
||||
if (config.threads == 0) config.threads = std.Thread.getCpuCount() catch 1;
|
||||
|
||||
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
||||
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
||||
|
||||
const stdin = std.io.getStdIn();
|
||||
const stdout = std.io.getStdOut();
|
||||
const out_tty = stdout.isTty();
|
||||
const in_tty = stdin.isTty();
|
||||
if (config.scan_ui == null) {
|
||||
if (export_file) |f| {
|
||||
if (export_json orelse export_bin) |f| {
|
||||
if (!out_tty or std.mem.eql(u8, f, "-")) config.scan_ui = .none
|
||||
else config.scan_ui = .line;
|
||||
} else config.scan_ui = .full;
|
||||
}
|
||||
if (!in_tty and import_file == null and export_file == null)
|
||||
if (!in_tty and import_file == null and export_json == null and export_bin == null and !quit_after_scan)
|
||||
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
|
||||
config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
|
||||
config.nc_tty = !in_tty or (if (export_json orelse export_bin) |f| std.mem.eql(u8, f, "-") else false);
|
||||
|
||||
event_delay_timer = std.time.Timer.start() catch unreachable;
|
||||
defer ui.deinit();
|
||||
|
||||
const out_file = if (export_file) |f| (
|
||||
if (export_json) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)})
|
||||
) else null;
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
json_export.setupOutput(file);
|
||||
sink.global.sink = .json;
|
||||
} else if (export_bin) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
bin_export.setupOutput(file);
|
||||
sink.global.sink = .bin;
|
||||
}
|
||||
|
||||
if (import_file) |f| {
|
||||
scan.importRoot(f, out_file);
|
||||
readImport(f) catch |e| ui.die("Error reading file '{s}': {s}.\n", .{f, ui.errorString(e)});
|
||||
config.imported = true;
|
||||
} else scan.scanRoot(scan_dir orelse ".", out_file)
|
||||
catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
if (quit_after_scan or out_file != null) return;
|
||||
if (config.binreader and (export_json != null or export_bin != null))
|
||||
bin_reader.import();
|
||||
} else {
|
||||
var buf: [std.fs.max_path_bytes+1]u8 = @splat(0);
|
||||
const path =
|
||||
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
||||
else |_| (scan_dir orelse ".");
|
||||
scan.scan(path) catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
}
|
||||
if (quit_after_scan or export_json != null or export_bin != null) return;
|
||||
|
||||
config.can_shell = config.can_shell orelse !config.imported;
|
||||
config.can_delete = config.can_delete orelse !config.imported;
|
||||
|
|
@ -525,44 +587,57 @@ pub fn main() void {
|
|||
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
|
||||
ui.init();
|
||||
state = .browse;
|
||||
browser.dir_parent = model.root;
|
||||
browser.loadDir(null);
|
||||
browser.initRoot();
|
||||
|
||||
while (true) {
|
||||
switch (state) {
|
||||
.refresh => {
|
||||
scan.scan();
|
||||
var full_path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer full_path.deinit(allocator);
|
||||
mem_sink.global.root.?.fmtPath(allocator, true, &full_path);
|
||||
scan.scan(util.arrayListBufZ(&full_path, allocator)) catch {
|
||||
sink.global.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable;
|
||||
sink.global.state = .err;
|
||||
while (state == .refresh) handleEvent(true, true);
|
||||
};
|
||||
state = .browse;
|
||||
browser.loadDir(null);
|
||||
browser.loadDir(0);
|
||||
},
|
||||
.shell => {
|
||||
spawnShell();
|
||||
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
ui.runCmd(&.{shell}, browser.dir_path, &env, false);
|
||||
state = .browse;
|
||||
},
|
||||
.delete => {
|
||||
const next = delete.delete();
|
||||
if (state != .refresh) {
|
||||
state = .browse;
|
||||
browser.loadDir(next);
|
||||
browser.loadDir(if (next) |n| n.nameHash() else 0);
|
||||
}
|
||||
},
|
||||
else => handleEvent(true, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var event_delay_timer: std.time.Timer = undefined;
|
||||
pub var event_delay_timer: std.time.Timer = undefined;
|
||||
|
||||
// Draw the screen and handle the next input event.
|
||||
// In non-blocking mode, screen drawing is rate-limited to keep this function fast.
|
||||
pub fn handleEvent(block: bool, force_draw: bool) void {
|
||||
while (ui.oom_threads.load(.monotonic) > 0) ui.oom();
|
||||
|
||||
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
||||
if (ui.inited) _ = ui.c.erase();
|
||||
if (ui.inited) _ = c.erase();
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.draw(),
|
||||
.scan, .refresh => sink.draw(),
|
||||
.browse => browser.draw(),
|
||||
.delete => delete.draw(),
|
||||
.shell => unreachable,
|
||||
}
|
||||
if (ui.inited) _ = ui.c.refresh();
|
||||
if (ui.inited) _ = c.refresh();
|
||||
event_delay_timer.reset();
|
||||
}
|
||||
if (!ui.inited) {
|
||||
|
|
@ -576,7 +651,7 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
|||
if (ch == 0) return;
|
||||
if (ch == -1) return handleEvent(firstblock, true);
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.keyInput(ch),
|
||||
.scan, .refresh => sink.keyInput(ch),
|
||||
.browse => browser.keyInput(ch),
|
||||
.delete => delete.keyInput(ch),
|
||||
.shell => unreachable,
|
||||
|
|
@ -590,13 +665,13 @@ test "argument parser" {
|
|||
const T = struct {
|
||||
a: Args,
|
||||
fn opt(self: *@This(), isopt: bool, val: []const u8) !void {
|
||||
const o = self.a.next().?;
|
||||
const o = (self.a.next() catch unreachable).?;
|
||||
try std.testing.expectEqual(isopt, o.opt);
|
||||
try std.testing.expectEqualStrings(val, o.val);
|
||||
try std.testing.expectEqual(o.is(val), isopt);
|
||||
}
|
||||
fn arg(self: *@This(), val: []const u8) !void {
|
||||
try std.testing.expectEqualStrings(val, self.a.arg());
|
||||
try std.testing.expectEqualStrings(val, self.a.arg() catch unreachable);
|
||||
}
|
||||
};
|
||||
var t = T{ .a = Args.init(&lst) };
|
||||
|
|
|
|||
212
src/mem_sink.zig
Normal file
212
src/mem_sink.zig
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var root: ?*model.Dir = null;
|
||||
pub var stats: bool = true; // calculate aggregate directory stats
|
||||
};
|
||||
|
||||
pub const Thread = struct {
|
||||
// Arena allocator for model.Entry structs, these are never freed.
|
||||
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
||||
};
|
||||
|
||||
pub fn statToEntry(stat: *const sink.Stat, e: *model.Entry, parent: *model.Dir) void {
|
||||
e.pack.blocks = stat.blocks;
|
||||
e.size = stat.size;
|
||||
if (e.dir()) |d| {
|
||||
d.parent = parent;
|
||||
d.pack.dev = model.devices.getId(stat.dev);
|
||||
}
|
||||
if (e.link()) |l| {
|
||||
l.parent = parent;
|
||||
l.ino = stat.ino;
|
||||
l.pack.nlink = stat.nlink;
|
||||
model.inodes.lock.lock();
|
||||
defer model.inodes.lock.unlock();
|
||||
l.addLink();
|
||||
}
|
||||
if (e.ext()) |ext| ext.* = stat.ext;
|
||||
}
|
||||
|
||||
pub const Dir = struct {
|
||||
dir: *model.Dir,
|
||||
entries: Map,
|
||||
|
||||
own_blocks: model.Blocks,
|
||||
own_bytes: u64,
|
||||
|
||||
// Additional counts collected from subdirectories. Subdirs may run final()
|
||||
// from separate threads so these need to be protected.
|
||||
blocks: model.Blocks = 0,
|
||||
bytes: u64 = 0,
|
||||
items: u32 = 0,
|
||||
mtime: u64 = 0,
|
||||
suberr: bool = false,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
|
||||
const Map = std.HashMap(*model.Entry, void, HashContext, 80);
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), e: *model.Entry) u64 {
|
||||
return std.hash.Wyhash.hash(0, e.name());
|
||||
}
|
||||
pub fn eql(_: @This(), a: *model.Entry, b: *model.Entry) bool {
|
||||
return a == b or std.mem.eql(u8, a.name(), b.name());
|
||||
}
|
||||
};
|
||||
|
||||
const HashContextAdapted = struct {
|
||||
pub fn hash(_: @This(), v: []const u8) u64 {
|
||||
return std.hash.Wyhash.hash(0, v);
|
||||
}
|
||||
pub fn eql(_: @This(), a: []const u8, b: *model.Entry) bool {
|
||||
return std.mem.eql(u8, a, b.name());
|
||||
}
|
||||
};
|
||||
|
||||
fn init(dir: *model.Dir) Dir {
|
||||
var self = Dir{
|
||||
.dir = dir,
|
||||
.entries = Map.initContext(main.allocator, HashContext{}),
|
||||
.own_blocks = dir.entry.pack.blocks,
|
||||
.own_bytes = dir.entry.size,
|
||||
};
|
||||
|
||||
var count: Map.Size = 0;
|
||||
var it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) count += 1;
|
||||
self.entries.ensureUnusedCapacity(count) catch unreachable;
|
||||
|
||||
it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr)
|
||||
self.entries.putAssumeCapacity(e, {});
|
||||
return self;
|
||||
}
|
||||
|
||||
fn getEntry(self: *Dir, t: *Thread, etype: model.EType, isext: bool, name: []const u8) *model.Entry {
|
||||
if (self.entries.getKeyAdapted(name, HashContextAdapted{})) |e| {
|
||||
// XXX: In-place conversion may be possible in some cases.
|
||||
if (e.pack.etype.base() == etype.base() and (!isext or e.pack.isext)) {
|
||||
e.pack.etype = etype;
|
||||
e.pack.isext = isext;
|
||||
_ = self.entries.removeAdapted(name, HashContextAdapted{});
|
||||
return e;
|
||||
}
|
||||
}
|
||||
const e = model.Entry.create(t.arena.allocator(), etype, isext, name);
|
||||
e.next.ptr = self.dir.sub.ptr;
|
||||
self.dir.sub.ptr = e;
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addSpecial(self: *Dir, t: *Thread, name: []const u8, st: model.EType) void {
|
||||
self.dir.items += 1;
|
||||
if (st == .err) self.dir.pack.suberr = true;
|
||||
_ = self.getEntry(t, st, false, name);
|
||||
}
|
||||
|
||||
pub fn addStat(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) *model.Entry {
|
||||
if (global.stats) {
|
||||
self.dir.items +|= 1;
|
||||
if (stat.etype != .link) {
|
||||
self.dir.entry.pack.blocks +|= stat.blocks;
|
||||
self.dir.entry.size +|= stat.size;
|
||||
}
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (stat.ext.mtime > e.mtime) e.mtime = stat.ext.mtime;
|
||||
}
|
||||
}
|
||||
|
||||
const e = self.getEntry(t, stat.etype, main.config.extended and !stat.ext.isEmpty(), name);
|
||||
statToEntry(stat, e, self.dir);
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addDir(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
return init(self.addStat(t, name, stat).dir().?);
|
||||
}
|
||||
|
||||
pub fn setReadError(self: *Dir) void {
|
||||
self.dir.pack.err = true;
|
||||
}
|
||||
|
||||
pub fn final(self: *Dir, parent: ?*Dir) void {
|
||||
// Remove entries we've not seen
|
||||
if (self.entries.count() > 0) {
|
||||
var it = &self.dir.sub.ptr;
|
||||
while (it.*) |e| {
|
||||
if (self.entries.getKey(e) == e) it.* = e.next.ptr
|
||||
else it = &e.next.ptr;
|
||||
}
|
||||
}
|
||||
self.entries.deinit();
|
||||
|
||||
if (!global.stats) return;
|
||||
|
||||
// Grab counts collected from subdirectories
|
||||
self.dir.entry.pack.blocks +|= self.blocks;
|
||||
self.dir.entry.size +|= self.bytes;
|
||||
self.dir.items +|= self.items;
|
||||
if (self.suberr) self.dir.pack.suberr = true;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (self.mtime > e.mtime) e.mtime = self.mtime;
|
||||
}
|
||||
|
||||
// Add own counts to parent
|
||||
if (parent) |p| {
|
||||
p.lock.lock();
|
||||
defer p.lock.unlock();
|
||||
p.blocks +|= self.dir.entry.pack.blocks - self.own_blocks;
|
||||
p.bytes +|= self.dir.entry.size - self.own_bytes;
|
||||
p.items +|= self.dir.items;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (e.mtime > p.mtime) p.mtime = e.mtime;
|
||||
}
|
||||
if (self.suberr or self.dir.pack.suberr or self.dir.pack.err) p.suberr = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
const p = global.root orelse blk: {
|
||||
model.root = model.Entry.create(main.allocator, .dir, main.config.extended and !stat.ext.isEmpty(), path).dir().?;
|
||||
break :blk model.root;
|
||||
};
|
||||
sink.global.state = .zeroing;
|
||||
if (p.items > 10_000) main.handleEvent(false, true);
|
||||
// Do the zeroStats() here, after the "root" entry has been
|
||||
// stat'ed and opened, so that a fatal error on refresh won't
|
||||
// zero-out the requested directory.
|
||||
p.entry.zeroStats(p.parent);
|
||||
sink.global.state = .running;
|
||||
p.entry.pack.blocks = stat.blocks;
|
||||
p.entry.size = stat.size;
|
||||
p.pack.dev = model.devices.getId(stat.dev);
|
||||
if (p.entry.ext()) |e| e.* = stat.ext;
|
||||
return Dir.init(p);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
if (!global.stats) return;
|
||||
|
||||
sink.global.state = .hlcnt;
|
||||
main.handleEvent(false, true);
|
||||
const dir = global.root orelse model.root;
|
||||
var it: ?*model.Dir = dir;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.updateSubErr();
|
||||
if (p != dir) {
|
||||
p.entry.pack.blocks +|= dir.entry.pack.blocks;
|
||||
p.entry.size +|= dir.entry.size;
|
||||
p.items +|= dir.items + 1;
|
||||
}
|
||||
}
|
||||
model.inodes.addAllStats();
|
||||
}
|
||||
73
src/mem_src.zig
Normal file
73
src/mem_src.zig
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
// Emit the memory tree to the sink in depth-first order from a single thread,
|
||||
// suitable for JSON export.
|
||||
|
||||
fn toStat(e: *model.Entry) sink.Stat {
|
||||
const el = e.link();
|
||||
return sink.Stat{
|
||||
.etype = e.pack.etype,
|
||||
.blocks = e.pack.blocks,
|
||||
.size = e.size,
|
||||
.dev =
|
||||
if (e.dir()) |d| model.devices.list.items[d.pack.dev]
|
||||
else if (el) |l| model.devices.list.items[l.parent.pack.dev]
|
||||
else undefined,
|
||||
.ino = if (el) |l| l.ino else undefined,
|
||||
.nlink = if (el) |l| l.pack.nlink else 1,
|
||||
.ext = if (e.ext()) |x| x.* else .{},
|
||||
};
|
||||
}
|
||||
|
||||
const Ctx = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat,
|
||||
};
|
||||
|
||||
|
||||
fn rec(ctx: *Ctx, dir: *sink.Dir, entry: *model.Entry) void {
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
|
||||
ctx.stat = toStat(entry);
|
||||
switch (entry.pack.etype) {
|
||||
.dir => {
|
||||
const d = entry.dir().?;
|
||||
var ndir = dir.addDir(ctx.sink, entry.name(), &ctx.stat);
|
||||
ctx.sink.setDir(ndir);
|
||||
if (d.pack.err) ndir.setReadError(ctx.sink);
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(ctx, ndir, e);
|
||||
ctx.sink.setDir(dir);
|
||||
ndir.unref(ctx.sink);
|
||||
},
|
||||
.reg, .nonreg, .link => dir.addStat(ctx.sink, entry.name(), &ctx.stat),
|
||||
else => dir.addSpecial(ctx.sink, entry.name(), entry.pack.etype),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn run(d: *model.Dir) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
|
||||
var ctx: Ctx = .{
|
||||
.sink = &sink_threads[0],
|
||||
.stat = toStat(&d.entry),
|
||||
};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
d.fmtPath(main.allocator, true, &buf);
|
||||
const root = sink.createRoot(buf.items, &ctx.stat);
|
||||
buf.deinit(main.allocator);
|
||||
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
|
||||
|
||||
root.unref(ctx.sink);
|
||||
sink.done();
|
||||
}
|
||||
424
src/model.zig
424
src/model.zig
|
|
@ -6,20 +6,51 @@ const main = @import("main.zig");
|
|||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// While an arena allocator is optimimal for almost all scenarios in which ncdu
|
||||
// is used, it doesn't allow for re-using deleted nodes after doing a delete or
|
||||
// refresh operation, so a long-running ncdu session with regular refreshes
|
||||
// will leak memory, but I'd say that's worth the efficiency gains.
|
||||
// TODO: Can still implement a simple bucketed free list on top of this arena
|
||||
// allocator to reuse nodes, if necessary.
|
||||
var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = allocator_state.allocator();
|
||||
// Numbers are used in the binfmt export, so must be stable.
|
||||
pub const EType = enum(i3) {
|
||||
dir = 0,
|
||||
reg = 1,
|
||||
nonreg = 2,
|
||||
link = 3,
|
||||
err = -1,
|
||||
pattern = -2,
|
||||
otherfs = -3,
|
||||
kernfs = -4,
|
||||
|
||||
pub const EType = enum(u2) { dir, link, file };
|
||||
pub fn base(t: EType) EType {
|
||||
return switch (t) {
|
||||
.dir, .link => t,
|
||||
else => .reg,
|
||||
};
|
||||
}
|
||||
|
||||
// Whether this entry should be displayed as a "directory".
|
||||
// Some dirs are actually represented in this data model as a File for efficiency.
|
||||
pub fn isDirectory(t: EType) bool {
|
||||
return switch (t) {
|
||||
.dir, .otherfs, .kernfs => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags.
|
||||
pub const Blocks = u60;
|
||||
|
||||
// Entries read from bin_reader may refer to other entries by itemref rather than pointer.
|
||||
// This is a hack that allows browser.zig to use the same types for in-memory
|
||||
// and bin_reader-backed directory trees. Most code can only deal with
|
||||
// in-memory trees and accesses the .ptr field directly.
|
||||
pub const Ref = extern union {
|
||||
ptr: ?*Entry align(1),
|
||||
ref: u64 align(1),
|
||||
|
||||
pub fn isNull(r: Ref) bool {
|
||||
if (main.config.binreader) return r.ref == std.math.maxInt(u64)
|
||||
else return r.ptr == null;
|
||||
}
|
||||
};
|
||||
|
||||
// Memory layout:
|
||||
// (Ext +) Dir + name
|
||||
// or: (Ext +) Link + name
|
||||
|
|
@ -34,16 +65,11 @@ pub const Blocks = u60;
|
|||
pub const Entry = extern struct {
|
||||
pack: Packed align(1),
|
||||
size: u64 align(1) = 0,
|
||||
next: ?*Entry align(1) = null,
|
||||
next: Ref = .{ .ptr = null },
|
||||
|
||||
pub const Packed = packed struct(u64) {
|
||||
etype: EType,
|
||||
isext: bool,
|
||||
// Whether or not this entry's size has been counted in its parents.
|
||||
// Counting of Link entries is deferred until the scan/delete operation has
|
||||
// completed, so for those entries this flag indicates an intention to be
|
||||
// counted.
|
||||
counted: bool = false,
|
||||
blocks: Blocks = 0, // 512-byte blocks
|
||||
};
|
||||
|
||||
|
|
@ -58,34 +84,33 @@ pub const Entry = extern struct {
|
|||
}
|
||||
|
||||
pub fn file(self: *Self) ?*File {
|
||||
return if (self.pack.etype == .file) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
// Whether this entry should be displayed as a "directory".
|
||||
// Some dirs are actually represented in this data model as a File for efficiency.
|
||||
pub fn isDirectory(self: *Self) bool {
|
||||
return if (self.file()) |f| f.pack.other_fs or f.pack.kernfs else self.pack.etype == .dir;
|
||||
return if (self.pack.etype != .dir and self.pack.etype != .link) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn name(self: *const Self) [:0]const u8 {
|
||||
const self_name = switch (self.pack.etype) {
|
||||
.dir => &@as(*const Dir, @ptrCast(self)).name,
|
||||
.link => &@as(*const Link, @ptrCast(self)).name,
|
||||
.file => &@as(*const File, @ptrCast(self)).name,
|
||||
else => &@as(*const File, @ptrCast(self)).name,
|
||||
};
|
||||
const name_ptr: [*:0]const u8 = @ptrCast(self_name);
|
||||
return std.mem.sliceTo(name_ptr, 0);
|
||||
}
|
||||
|
||||
pub fn nameHash(self: *const Self) u64 {
|
||||
return std.hash.Wyhash.hash(0, self.name());
|
||||
}
|
||||
|
||||
pub fn ext(self: *Self) ?*Ext {
|
||||
if (!self.pack.isext) return null;
|
||||
return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1);
|
||||
}
|
||||
|
||||
fn alloc(comptime T: type, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
||||
var ptr = blk: while (true) {
|
||||
if (allocator.allocWithOptions(u8, size, 1, null)) |p| break :blk p
|
||||
const alignment = if (@typeInfo(@TypeOf(std.mem.Allocator.allocWithOptions)).@"fn".params[3].type == ?u29) 1 else std.mem.Alignment.@"1";
|
||||
if (allocator.allocWithOptions(u8, size, alignment, null)) |p| break :blk p
|
||||
else |_| {}
|
||||
ui.oom();
|
||||
};
|
||||
|
|
@ -101,115 +126,65 @@ pub const Entry = extern struct {
|
|||
return &e.entry;
|
||||
}
|
||||
|
||||
pub fn create(etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
pub fn create(allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
return switch (etype) {
|
||||
.dir => alloc(Dir, etype, isext, ename),
|
||||
.file => alloc(File, etype, isext, ename),
|
||||
.link => alloc(Link, etype, isext, ename),
|
||||
.dir => alloc(Dir, allocator, etype, isext, ename),
|
||||
.link => alloc(Link, allocator, etype, isext, ename),
|
||||
else => alloc(File, allocator, etype, isext, ename),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Self, allocator: std.mem.Allocator) void {
|
||||
const ptr: [*]u8 = if (self.ext()) |e| @ptrCast(e) else @ptrCast(self);
|
||||
const esize: usize = switch (self.pack.etype) {
|
||||
.dir => @sizeOf(Dir),
|
||||
.link => @sizeOf(Link),
|
||||
else => @sizeOf(File),
|
||||
};
|
||||
const size = (if (self.pack.isext) @as(usize, @sizeOf(Ext)) else 0) + esize + self.name().len + 1;
|
||||
allocator.free(ptr[0..size]);
|
||||
}
|
||||
|
||||
fn hasErr(self: *Self) bool {
|
||||
return
|
||||
if (self.file()) |f| f.pack.err
|
||||
else if (self.dir()) |d| d.pack.err or d.pack.suberr
|
||||
else false;
|
||||
if(self.dir()) |d| d.pack.err or d.pack.suberr
|
||||
else self.pack.etype == .err;
|
||||
}
|
||||
|
||||
pub fn addStats(self: *Entry, parent: *Dir, nlink: u31) void {
|
||||
if (self.pack.counted) return;
|
||||
self.pack.counted = true;
|
||||
|
||||
// Add link to the inode map, but don't count its size (yet).
|
||||
if (self.link()) |l| {
|
||||
l.parent = parent;
|
||||
var d = inodes.map.getOrPut(l) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
d.value_ptr.* = .{ .counted = false, .nlink = nlink };
|
||||
inodes.total_blocks +|= self.pack.blocks;
|
||||
l.next = l;
|
||||
} else {
|
||||
inodes.setStats(.{ .key_ptr = d.key_ptr, .value_ptr = d.value_ptr }, false);
|
||||
// If the nlink counts are not consistent, reset to 0 so we calculate with what we have instead.
|
||||
if (d.value_ptr.nlink != nlink)
|
||||
d.value_ptr.nlink = 0;
|
||||
l.next = d.key_ptr.*.next;
|
||||
d.key_ptr.*.next = l;
|
||||
}
|
||||
inodes.addUncounted(l);
|
||||
}
|
||||
|
||||
var it: ?*Dir = parent;
|
||||
while(it) |p| : (it = p.parent) {
|
||||
if (self.ext()) |e|
|
||||
if (p.entry.ext()) |pe|
|
||||
if (e.mtime > pe.mtime) { pe.mtime = e.mtime; };
|
||||
p.items +|= 1;
|
||||
if (self.pack.etype != .link) {
|
||||
p.entry.size +|= self.size;
|
||||
p.entry.pack.blocks +|= self.pack.blocks;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Opposite of addStats(), but has some limitations:
|
||||
// - If addStats() saturated adding sizes, then the sizes after delStats()
|
||||
// will be incorrect.
|
||||
// - mtime of parents is not adjusted (but that's a feature, possibly?)
|
||||
//
|
||||
// This function assumes that, for directories, all sub-entries have
|
||||
// already been un-counted.
|
||||
//
|
||||
// When removing a Link, the entry's nlink counter is reset to zero, so
|
||||
// that it will be recalculated based on our view of the tree. This means
|
||||
// that links outside of the scanned directory will not be considered
|
||||
// anymore, meaning that delStats() followed by addStats() with the same
|
||||
// data may cause information to be lost.
|
||||
pub fn delStats(self: *Entry, parent: *Dir) void {
|
||||
if (!self.pack.counted) return;
|
||||
defer self.pack.counted = false; // defer, to make sure inodes.setStats() still sees it as counted.
|
||||
|
||||
if (self.link()) |l| {
|
||||
var d = inodes.map.getEntry(l).?;
|
||||
inodes.setStats(d, false);
|
||||
d.value_ptr.nlink = 0;
|
||||
if (l.next == l) {
|
||||
_ = inodes.map.remove(l);
|
||||
_ = inodes.uncounted.remove(l);
|
||||
inodes.total_blocks -|= self.pack.blocks;
|
||||
} else {
|
||||
if (d.key_ptr.* == l)
|
||||
d.key_ptr.* = l.next;
|
||||
inodes.addUncounted(l.next);
|
||||
// This is O(n), which in this context has the potential to
|
||||
// slow ncdu down to a crawl. But this function is only called
|
||||
// on refresh/delete operations and even then it's not common
|
||||
// to have very long lists, so this blowing up should be very
|
||||
// rare. This removal can also be deferred to setStats() to
|
||||
// amortize the costs, if necessary.
|
||||
var it = l.next;
|
||||
while (it.next != l) it = it.next;
|
||||
it.next = l.next;
|
||||
}
|
||||
}
|
||||
|
||||
var it: ?*Dir = parent;
|
||||
while(it) |p| : (it = p.parent) {
|
||||
p.items -|= 1;
|
||||
if (self.pack.etype != .link) {
|
||||
p.entry.size -|= self.size;
|
||||
p.entry.pack.blocks -|= self.pack.blocks;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delStatsRec(self: *Entry, parent: *Dir) void {
|
||||
fn removeLinks(self: *Entry) void {
|
||||
if (self.dir()) |d| {
|
||||
var it = d.sub;
|
||||
while (it) |e| : (it = e.next)
|
||||
e.delStatsRec(d);
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.removeLinks();
|
||||
}
|
||||
self.delStats(parent);
|
||||
if (self.link()) |l| l.removeLink();
|
||||
}
|
||||
|
||||
fn zeroStatsRec(self: *Entry) void {
|
||||
self.pack.blocks = 0;
|
||||
self.size = 0;
|
||||
if (self.dir()) |d| {
|
||||
d.items = 0;
|
||||
d.pack.err = false;
|
||||
d.pack.suberr = false;
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.zeroStatsRec();
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively set stats and those of sub-items to zero and removes counts
|
||||
// from parent directories; as if this item does not exist in the tree.
|
||||
// XXX: Does not update the 'suberr' flag of parent directories, make sure
|
||||
// to call updateSubErr() afterwards.
|
||||
pub fn zeroStats(self: *Entry, parent: ?*Dir) void {
|
||||
self.removeLinks();
|
||||
|
||||
var it = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.entry.pack.blocks -|= self.pack.blocks;
|
||||
p.entry.size -|= self.size;
|
||||
p.items -|= 1 + (if (self.dir()) |d| d.items else 0);
|
||||
}
|
||||
self.zeroStatsRec();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -218,7 +193,7 @@ const DevId = u30; // Can be reduced to make room for more flags in Dir.Packed.
|
|||
pub const Dir = extern struct {
|
||||
entry: Entry,
|
||||
|
||||
sub: ?*Entry align(1) = null,
|
||||
sub: Ref = .{ .ptr = null },
|
||||
parent: ?*Dir align(1) = null,
|
||||
|
||||
// entry.{blocks,size}: Total size of all unique files + dirs. Non-shared hardlinks are counted only once.
|
||||
|
|
@ -243,19 +218,20 @@ pub const Dir = extern struct {
|
|||
suberr: bool = false,
|
||||
};
|
||||
|
||||
pub fn fmtPath(self: *const @This(), withRoot: bool, out: *std.ArrayList(u8)) void {
|
||||
pub fn fmtPath(self: *const @This(), alloc: std.mem.Allocator, withRoot: bool, out: *std.ArrayListUnmanaged(u8)) void {
|
||||
if (!withRoot and self.parent == null) return;
|
||||
var components = std.ArrayList([:0]const u8).init(main.allocator);
|
||||
defer components.deinit();
|
||||
var components: std.ArrayListUnmanaged([:0]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*const @This() = self;
|
||||
while (it) |e| : (it = e.parent)
|
||||
if (withRoot or e.parent != null)
|
||||
components.append(e.entry.name()) catch unreachable;
|
||||
components.append(main.allocator, e.entry.name()) catch unreachable;
|
||||
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/')) out.append('/') catch unreachable;
|
||||
out.appendSlice(components.items[i]) catch unreachable;
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(alloc, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
|
|
@ -265,8 +241,8 @@ pub const Dir = extern struct {
|
|||
// been updated and does not propagate to parents.
|
||||
pub fn updateSubErr(self: *@This()) void {
|
||||
self.pack.suberr = false;
|
||||
var sub = self.sub;
|
||||
while (sub) |e| : (sub = e.next) {
|
||||
var sub = self.sub.ptr;
|
||||
while (sub) |e| : (sub = e.next.ptr) {
|
||||
if (e.hasErr()) {
|
||||
self.pack.suberr = true;
|
||||
break;
|
||||
|
|
@ -279,58 +255,115 @@ pub const Dir = extern struct {
|
|||
pub const Link = extern struct {
|
||||
entry: Entry,
|
||||
parent: *Dir align(1) = undefined,
|
||||
next: *Link align(1) = undefined, // Singly circular linked list of all *Link nodes with the same dev,ino.
|
||||
next: *Link align(1) = undefined, // circular linked list of all *Link nodes with the same dev,ino.
|
||||
prev: *Link align(1) = undefined,
|
||||
// dev is inherited from the parent Dir
|
||||
ino: u64 align(1) = undefined,
|
||||
pack: Pack align(1) = .{},
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
const Pack = packed struct(u32) {
|
||||
// Whether this Inode is counted towards the parent directories.
|
||||
// Is kept synchronized between all Link nodes with the same dev/ino.
|
||||
counted: bool = false,
|
||||
// Number of links for this inode. When set to '0', we don't know the
|
||||
// actual nlink count; which happens for old JSON dumps.
|
||||
nlink: u31 = undefined,
|
||||
};
|
||||
|
||||
// Return value should be freed with main.allocator.
|
||||
pub fn path(self: *const @This(), withRoot: bool) [:0]const u8 {
|
||||
var out = std.ArrayList(u8).init(main.allocator);
|
||||
self.parent.fmtPath(withRoot, &out);
|
||||
out.append('/') catch unreachable;
|
||||
out.appendSlice(self.entry.name()) catch unreachable;
|
||||
return out.toOwnedSliceSentinel(0) catch unreachable;
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
self.parent.fmtPath(main.allocator, withRoot, &out);
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, self.entry.name()) catch unreachable;
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
// Add this link to the inodes map and mark it as 'uncounted'.
|
||||
pub fn addLink(l: *@This()) void {
|
||||
const d = inodes.map.getOrPut(l) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
l.next = l;
|
||||
l.prev = l;
|
||||
} else {
|
||||
inodes.setStats(d.key_ptr.*, false);
|
||||
l.next = d.key_ptr.*;
|
||||
l.prev = d.key_ptr.*.prev;
|
||||
l.next.prev = l;
|
||||
l.prev.next = l;
|
||||
}
|
||||
inodes.addUncounted(l);
|
||||
}
|
||||
|
||||
// Remove this link from the inodes map and remove its stats from parent directories.
|
||||
fn removeLink(l: *@This()) void {
|
||||
inodes.setStats(l, false);
|
||||
const entry = inodes.map.getEntry(l) orelse return;
|
||||
if (l.next == l) {
|
||||
_ = inodes.map.remove(l);
|
||||
_ = inodes.uncounted.remove(l);
|
||||
} else {
|
||||
// XXX: If this link is actually removed from the filesystem, then
|
||||
// the nlink count of the existing links should be updated to
|
||||
// reflect that. But we can't do that here, because this function
|
||||
// is also called before doing a filesystem refresh - in which case
|
||||
// the nlink count likely won't change. Best we can hope for is
|
||||
// that a refresh will encounter another link to the same inode and
|
||||
// trigger an nlink change.
|
||||
if (entry.key_ptr.* == l)
|
||||
entry.key_ptr.* = l.next;
|
||||
inodes.addUncounted(l.next);
|
||||
l.next.prev = l.prev;
|
||||
l.prev.next = l.next;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Anything that's not an (indexed) directory or hardlink. Excluded directories are also "Files".
|
||||
pub const File = extern struct {
|
||||
entry: Entry,
|
||||
pack: Packed = .{},
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
pub const Packed = packed struct(u8) {
|
||||
err: bool = false,
|
||||
excluded: bool = false,
|
||||
other_fs: bool = false,
|
||||
kernfs: bool = false,
|
||||
notreg: bool = false,
|
||||
_pad: u3 = 0, // Make this struct "ABI sized" to allow inclusion in an extern struct
|
||||
};
|
||||
};
|
||||
|
||||
pub const Ext = extern struct {
|
||||
pack: Pack = .{},
|
||||
mtime: u64 align(1) = 0,
|
||||
uid: u32 align(1) = 0,
|
||||
gid: u32 align(1) = 0,
|
||||
mode: u16 align(1) = 0,
|
||||
|
||||
pub const Pack = packed struct(u8) {
|
||||
hasmtime: bool = false,
|
||||
hasuid: bool = false,
|
||||
hasgid: bool = false,
|
||||
hasmode: bool = false,
|
||||
_pad: u4 = 0,
|
||||
};
|
||||
|
||||
pub fn isEmpty(e: *const Ext) bool {
|
||||
return !e.pack.hasmtime and !e.pack.hasuid and !e.pack.hasgid and !e.pack.hasmode;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// List of st_dev entries. Those are typically 64bits, but that's quite a waste
|
||||
// of space when a typical scan won't cover many unique devices.
|
||||
pub const devices = struct {
|
||||
var lock = std.Thread.Mutex{};
|
||||
// id -> dev
|
||||
pub var list = std.ArrayList(u64).init(main.allocator);
|
||||
pub var list: std.ArrayListUnmanaged(u64) = .empty;
|
||||
// dev -> id
|
||||
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||
|
||||
pub fn getId(dev: u64) DevId {
|
||||
lock.lock();
|
||||
defer lock.unlock();
|
||||
const d = lookup.getOrPut(dev) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
if (list.items.len >= std.math.maxInt(DevId)) ui.die("Maximum number of device identifiers exceeded.\n", .{});
|
||||
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
||||
list.append(dev) catch unreachable;
|
||||
list.append(main.allocator, dev) catch unreachable;
|
||||
}
|
||||
return d.value_ptr.*;
|
||||
}
|
||||
|
|
@ -343,14 +376,9 @@ pub const inodes = struct {
|
|||
// node in the list. Link entries with the same dev/ino are part of a
|
||||
// circular linked list, so you can iterate through all of them with this
|
||||
// single pointer.
|
||||
const Map = std.HashMap(*Link, Inode, HashContext, 80);
|
||||
const Map = std.HashMap(*Link, void, HashContext, 80);
|
||||
pub var map = Map.init(main.allocator);
|
||||
|
||||
// Cumulative size of all unique hard links in the map. This is a somewhat
|
||||
// ugly workaround to provide accurate sizes during the initial scan, when
|
||||
// the hard links are not counted as part of the parent directories yet.
|
||||
pub var total_blocks: Blocks = 0;
|
||||
|
||||
// List of nodes in 'map' with !counted, to speed up addAllStats().
|
||||
// If this list grows large relative to the number of nodes in 'map', then
|
||||
// this list is cleared and uncounted_full is set instead, so that
|
||||
|
|
@ -358,16 +386,7 @@ pub const inodes = struct {
|
|||
var uncounted = std.HashMap(*Link, void, HashContext, 80).init(main.allocator);
|
||||
var uncounted_full = true; // start with true for the initial scan
|
||||
|
||||
const Inode = packed struct {
|
||||
// Whether this Inode is counted towards the parent directories.
|
||||
counted: bool,
|
||||
// Number of links for this inode. When set to '0', we don't know the
|
||||
// actual nlink count, either because it wasn't part of the imported
|
||||
// JSON data or because we read inconsistent values from the
|
||||
// filesystem. The count will then be updated by the actual number of
|
||||
// links in our in-memory tree.
|
||||
nlink: u31,
|
||||
};
|
||||
pub var lock = std.Thread.Mutex{};
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), l: *Link) u64 {
|
||||
|
|
@ -395,61 +414,85 @@ pub const inodes = struct {
|
|||
// the list of *Links and their sizes and counts must be in the exact same
|
||||
// state as when the stats were added. Hence, any modification to the Link
|
||||
// state should be preceded by a setStats(.., false).
|
||||
fn setStats(entry: Map.Entry, add: bool) void {
|
||||
if (entry.value_ptr.counted == add) return;
|
||||
entry.value_ptr.counted = add;
|
||||
fn setStats(l: *Link, add: bool) void {
|
||||
if (l.pack.counted == add) return;
|
||||
|
||||
var nlink: u31 = 0;
|
||||
var inconsistent = false;
|
||||
var dirs = std.AutoHashMap(*Dir, u32).init(main.allocator);
|
||||
defer dirs.deinit();
|
||||
var it = entry.key_ptr.*;
|
||||
var it = l;
|
||||
while (true) {
|
||||
if (it.entry.pack.counted) {
|
||||
it.pack.counted = add;
|
||||
nlink += 1;
|
||||
if (it.pack.nlink != l.pack.nlink) inconsistent = true;
|
||||
var parent: ?*Dir = it.parent;
|
||||
while (parent) |p| : (parent = p.parent) {
|
||||
const de = dirs.getOrPut(p) catch unreachable;
|
||||
if (de.found_existing) de.value_ptr.* += 1
|
||||
else de.value_ptr.* = 1;
|
||||
}
|
||||
}
|
||||
it = it.next;
|
||||
if (it == entry.key_ptr.*)
|
||||
if (it == l)
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry.value_ptr.nlink < nlink) entry.value_ptr.nlink = nlink
|
||||
else nlink = entry.value_ptr.nlink;
|
||||
// There's not many sensible things we can do when we encounter
|
||||
// inconsistent nlink counts. Current approach is to use the number of
|
||||
// times we've seen this link in our tree as fallback for when the
|
||||
// nlink counts aren't matching. May want to add a warning of some
|
||||
// sorts to the UI at some point.
|
||||
if (!inconsistent and l.pack.nlink >= nlink) nlink = l.pack.nlink;
|
||||
|
||||
// XXX: We're also not testing for inconsistent entry sizes, instead
|
||||
// using the given 'l' size for all Links. Might warrant a warning as
|
||||
// well.
|
||||
|
||||
var dir_iter = dirs.iterator();
|
||||
if (add) {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.pack.blocks +|= entry.key_ptr.*.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size +|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.entry.pack.blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size +|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks +|= entry.key_ptr.*.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size +|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.shared_blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size +|= l.entry.size;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.pack.blocks -|= entry.key_ptr.*.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size -|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.entry.pack.blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size -|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks -|= entry.key_ptr.*.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size -|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.shared_blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size -|= l.entry.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// counters to track progress for addAllStats()
|
||||
pub var add_total: usize = 0;
|
||||
pub var add_done: usize = 0;
|
||||
|
||||
pub fn addAllStats() void {
|
||||
if (uncounted_full) {
|
||||
var it = map.iterator();
|
||||
while (it.next()) |e| setStats(e, true);
|
||||
add_total = map.count();
|
||||
add_done = 0;
|
||||
var it = map.keyIterator();
|
||||
while (it.next()) |e| {
|
||||
setStats(e.*, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
} else {
|
||||
var it = uncounted.iterator();
|
||||
while (it.next()) |u| if (map.getEntry(u.key_ptr.*)) |e| setStats(e, true);
|
||||
add_total = uncounted.count();
|
||||
add_done = 0;
|
||||
var it = uncounted.keyIterator();
|
||||
while (it.next()) |u| {
|
||||
if (map.getKey(u.*)) |e| setStats(e, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
}
|
||||
uncounted_full = false;
|
||||
if (uncounted.count() > 0)
|
||||
|
|
@ -462,8 +505,9 @@ pub var root: *Dir = undefined;
|
|||
|
||||
|
||||
test "entry" {
|
||||
var e = Entry.create(.file, false, "hello");
|
||||
try std.testing.expectEqual(e.pack.etype, .file);
|
||||
var e = Entry.create(std.testing.allocator, .reg, false, "hello");
|
||||
defer e.destroy(std.testing.allocator);
|
||||
try std.testing.expectEqual(e.pack.etype, .reg);
|
||||
try std.testing.expect(!e.pack.isext);
|
||||
try std.testing.expectEqualStrings(e.name(), "hello");
|
||||
}
|
||||
|
|
|
|||
1267
src/scan.zig
1267
src/scan.zig
File diff suppressed because it is too large
Load diff
498
src/sink.zig
Normal file
498
src/sink.zig
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Terminology note:
|
||||
// "source" is where scan results come from, these are scan.zig, mem_src.zig
|
||||
// and json_import.zig.
|
||||
// "sink" is where scan results go to. This file provides a generic sink API
|
||||
// for sources to use. The API forwards the results to specific sink
|
||||
// implementations (mem_sink.zig or json_export.zig) and provides progress
|
||||
// updates.
|
||||
|
||||
// API for sources:
|
||||
//
|
||||
// Single-threaded:
|
||||
//
|
||||
// createThreads(1)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(name, stat)
|
||||
// (no dir.stuff here)
|
||||
// sub.addstuff();
|
||||
// sub.unref();
|
||||
// dir.unref();
|
||||
// done()
|
||||
//
|
||||
// Multi-threaded interleaving:
|
||||
//
|
||||
// createThreads(n)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(...)
|
||||
// sub.addstuff();
|
||||
// sub2 = dir.addDir(..);
|
||||
// sub.unref();
|
||||
// dir.unref(); // <- no more direct descendants for x, but subdirs could still be active
|
||||
// sub2.addStuff();
|
||||
// sub2.unref(); // <- this is where 'dir' is really done.
|
||||
// done()
|
||||
//
|
||||
// Rule:
|
||||
// No concurrent method calls on a single Dir object, but objects may be passed between threads.
|
||||
|
||||
|
||||
// Concise stat struct for fields we're interested in, with the types used by the model.
|
||||
pub const Stat = struct {
|
||||
etype: model.EType = .reg,
|
||||
blocks: model.Blocks = 0,
|
||||
size: u64 = 0,
|
||||
dev: u64 = 0,
|
||||
ino: u64 = 0,
|
||||
nlink: u31 = 0,
|
||||
ext: model.Ext = .{},
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
refcnt: std.atomic.Value(usize) = std.atomic.Value(usize).init(1),
|
||||
name: []const u8,
|
||||
parent: ?*Dir,
|
||||
out: Out,
|
||||
|
||||
const Out = union(enum) {
|
||||
mem: mem_sink.Dir,
|
||||
json: json_export.Dir,
|
||||
bin: bin_export.Dir,
|
||||
};
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
std.debug.assert(@intFromEnum(sp) < 0); // >=0 aren't "special"
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.addSpecial(&t.sink.mem, name, sp),
|
||||
.json => |*j| j.addSpecial(name, sp),
|
||||
.bin => |*b| b.addSpecial(&t.sink.bin, name, sp),
|
||||
}
|
||||
if (sp == .err) {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
const p = d.path();
|
||||
global.last_error = std.fs.path.joinZ(main.allocator, &.{ p, name }) catch unreachable;
|
||||
main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) void {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes((stat.blocks *| 512) / @max(1, stat.nlink));
|
||||
std.debug.assert(stat.etype != .dir);
|
||||
switch (d.out) {
|
||||
.mem => |*m| _ = m.addStat(&t.sink.mem, name, stat),
|
||||
.json => |*j| j.addStat(name, stat),
|
||||
.bin => |*b| b.addStat(&t.sink.bin, name, stat),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) *Dir {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes(stat.blocks *| 512);
|
||||
std.debug.assert(stat.etype == .dir);
|
||||
std.debug.assert(d.out != .json or d.refcnt.load(.monotonic) == 1);
|
||||
|
||||
const s = main.allocator.create(Dir) catch unreachable;
|
||||
s.* = .{
|
||||
.name = main.allocator.dupe(u8, name) catch unreachable,
|
||||
.parent = d,
|
||||
.out = switch (d.out) {
|
||||
.mem => |*m| .{ .mem = m.addDir(&t.sink.mem, name, stat) },
|
||||
.json => |*j| .{ .json = j.addDir(name, stat) },
|
||||
.bin => |*b| .{ .bin = b.addDir(stat) },
|
||||
},
|
||||
};
|
||||
d.ref();
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir, t: *Thread) void {
|
||||
_ = t;
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.setReadError(),
|
||||
.json => |*j| j.setReadError(),
|
||||
.bin => |*b| b.setReadError(),
|
||||
}
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = d.path();
|
||||
}
|
||||
|
||||
fn path(d: *Dir) [:0]u8 {
|
||||
var components: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*Dir = d;
|
||||
while (it) |e| : (it = e.parent) components.append(main.allocator, e.name) catch unreachable;
|
||||
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
fn ref(d: *Dir) void {
|
||||
_ = d.refcnt.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn unref(d: *Dir, t: *Thread) void {
|
||||
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
||||
_ = d.refcnt.load(.acquire);
|
||||
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
||||
.json => |*j| j.final(),
|
||||
.bin => |*b| b.final(&t.sink.bin, d.name, if (d.parent) |p| &p.out.bin else null),
|
||||
}
|
||||
|
||||
if (d.parent) |p| p.unref(t);
|
||||
if (d.name.len > 0) main.allocator.free(d.name);
|
||||
main.allocator.destroy(d);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
current_dir: ?*Dir = null,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
// On 32-bit architectures, bytes_seen is protected by the above mutex instead.
|
||||
bytes_seen: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
files_seen: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
|
||||
sink: union {
|
||||
mem: mem_sink.Thread,
|
||||
json: void,
|
||||
bin: bin_export.Thread,
|
||||
} = .{.mem = .{}},
|
||||
|
||||
fn addBytes(t: *Thread, bytes: u64) void {
|
||||
if (@bitSizeOf(usize) >= 64) _ = t.bytes_seen.fetchAdd(bytes, .monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.bytes_seen.raw += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
fn getBytes(t: *Thread) u64 {
|
||||
if (@bitSizeOf(usize) >= 64) return t.bytes_seen.load(.monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
return t.bytes_seen.raw;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setDir(t: *Thread, d: ?*Dir) void {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.current_dir = d;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var state: enum { done, err, zeroing, hlcnt, running } = .running;
|
||||
pub var threads: []Thread = undefined;
|
||||
pub var sink: enum { json, mem, bin } = .mem;
|
||||
|
||||
pub var last_error: ?[:0]u8 = null;
|
||||
var last_error_lock = std.Thread.Mutex{};
|
||||
var need_confirm_quit = false;
|
||||
};
|
||||
|
||||
|
||||
// Must be the first thing to call from a source; initializes global state.
|
||||
pub fn createThreads(num: usize) []Thread {
|
||||
// JSON export does not support multiple threads, scan into memory first.
|
||||
if (global.sink == .json and num > 1) {
|
||||
global.sink = .mem;
|
||||
mem_sink.global.stats = false;
|
||||
}
|
||||
|
||||
global.state = .running;
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = null;
|
||||
global.threads = main.allocator.alloc(Thread, num) catch unreachable;
|
||||
for (global.threads) |*t| t.* = .{
|
||||
.sink = switch (global.sink) {
|
||||
.mem => .{ .mem = .{} },
|
||||
.json => .{ .json = {} },
|
||||
.bin => .{ .bin = .{} },
|
||||
},
|
||||
};
|
||||
return global.threads;
|
||||
}
|
||||
|
||||
|
||||
// Must be the last thing to call from a source.
|
||||
pub fn done() void {
|
||||
switch (global.sink) {
|
||||
.mem => mem_sink.done(),
|
||||
.json => json_export.done(),
|
||||
.bin => bin_export.done(global.threads),
|
||||
}
|
||||
global.state = .done;
|
||||
main.allocator.free(global.threads);
|
||||
|
||||
// We scanned into memory, now we need to scan from memory to JSON
|
||||
if (global.sink == .mem and !mem_sink.global.stats) {
|
||||
global.sink = .json;
|
||||
mem_src.run(model.root);
|
||||
}
|
||||
|
||||
// Clear the screen when done.
|
||||
if (main.config.scan_ui == .line) main.handleEvent(false, true);
|
||||
}
|
||||
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const Stat) *Dir {
|
||||
const d = main.allocator.create(Dir) catch unreachable;
|
||||
d.* = .{
|
||||
.name = main.allocator.dupe(u8, path) catch unreachable,
|
||||
.parent = null,
|
||||
.out = switch (global.sink) {
|
||||
.mem => .{ .mem = mem_sink.createRoot(path, stat) },
|
||||
.json => .{ .json = json_export.createRoot(path, stat) },
|
||||
.bin => .{ .bin = bin_export.createRoot(stat, global.threads) },
|
||||
},
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
fn drawConsole() void {
|
||||
const st = struct {
|
||||
var ansi: ?bool = null;
|
||||
var lines_written: usize = 0;
|
||||
};
|
||||
const stderr = if (@hasDecl(std.io, "getStdErr")) std.io.getStdErr() else std.fs.File.stderr();
|
||||
const ansi = st.ansi orelse blk: {
|
||||
const t = stderr.supportsAnsiEscapeCodes();
|
||||
st.ansi = t;
|
||||
break :blk t;
|
||||
};
|
||||
|
||||
var buf: [4096]u8 = undefined;
|
||||
var strm = std.io.fixedBufferStream(buf[0..]);
|
||||
var wr = strm.writer();
|
||||
while (ansi and st.lines_written > 0) {
|
||||
wr.writeAll("\x1b[1F\x1b[2K") catch {};
|
||||
st.lines_written -= 1;
|
||||
}
|
||||
|
||||
if (global.state == .hlcnt) {
|
||||
wr.writeAll("Counting hardlinks...") catch {};
|
||||
if (model.inodes.add_total > 0)
|
||||
wr.print(" {} / {}", .{ model.inodes.add_done, model.inodes.add_total }) catch {};
|
||||
wr.writeByte('\n') catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
} else if (global.state == .running) {
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
const r = ui.FmtSize.fmt(bytes);
|
||||
wr.print("{} files / {s}{s}\n", .{files, r.num(), r.unit}) catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
for (global.threads, 0..) |*t, i| {
|
||||
const dir = blk: {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
wr.print(" #{}: {s}\n", .{i+1, ui.shorten(ui.toUtf8(dir orelse "(waiting)"), 73)}) catch {};
|
||||
st.lines_written += 1;
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
stderr.writeAll(strm.getWritten()) catch {};
|
||||
}
|
||||
|
||||
|
||||
fn drawProgress() void {
|
||||
const st = struct { var animation_pos: usize = 0; };
|
||||
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
|
||||
ui.init();
|
||||
const width = ui.cols -| 5;
|
||||
const numthreads: u32 = @intCast(@min(global.threads.len, @max(1, ui.rows -| 10)));
|
||||
const box = ui.Box.create(8 + numthreads, width, "Scanning...");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Total items: ");
|
||||
ui.addnum(.default, files);
|
||||
|
||||
if (width > 48) {
|
||||
box.move(2, 30);
|
||||
ui.addstr("size: ");
|
||||
ui.addsize(.default, bytes);
|
||||
}
|
||||
|
||||
for (0..numthreads) |i| {
|
||||
box.move(3+@as(u32, @intCast(i)), 4);
|
||||
const dir = blk: {
|
||||
const t = &global.threads[i];
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
ui.addstr(ui.shorten(ui.toUtf8(dir orelse "(waiting)"), width -| 6));
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
|
||||
blk: {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
const err = global.last_error orelse break :blk;
|
||||
box.move(4 + numthreads, 2);
|
||||
ui.style(.bold);
|
||||
ui.addstr("Warning: ");
|
||||
ui.style(.default);
|
||||
ui.addstr("error scanning ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(err), width -| 28));
|
||||
box.move(5 + numthreads, 3);
|
||||
ui.addstr("some directory sizes may not be correct.");
|
||||
}
|
||||
|
||||
if (global.need_confirm_quit) {
|
||||
box.move(6 + numthreads, width -| 20);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('y');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to confirm");
|
||||
} else {
|
||||
box.move(6 + numthreads, width -| 18);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('q');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to abort");
|
||||
}
|
||||
|
||||
if (main.config.update_delay < std.time.ns_per_s and width > 40) {
|
||||
const txt = "Scanning...";
|
||||
st.animation_pos += 1;
|
||||
if (st.animation_pos >= txt.len*2) st.animation_pos = 0;
|
||||
if (st.animation_pos < txt.len) {
|
||||
box.move(6 + numthreads, 2);
|
||||
for (txt[0..st.animation_pos + 1]) |t| ui.addch(t);
|
||||
} else {
|
||||
var i: u32 = txt.len-1;
|
||||
while (i > st.animation_pos-txt.len) : (i -= 1) {
|
||||
box.move(6 + numthreads, 2+i);
|
||||
ui.addch(txt[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn drawError() void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(6, width, "Scan error");
|
||||
|
||||
box.move(2, 2);
|
||||
ui.addstr("Unable to open directory:");
|
||||
box.move(3, 4);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(global.last_error.?), width -| 10));
|
||||
|
||||
box.move(4, width -| 27);
|
||||
ui.addstr("Press any key to continue");
|
||||
}
|
||||
|
||||
|
||||
fn drawMessage(msg: []const u8) void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(4, width, "Scan error");
|
||||
box.move(2, 2);
|
||||
ui.addstr(msg);
|
||||
}
|
||||
|
||||
|
||||
pub fn draw() void {
|
||||
switch (main.config.scan_ui.?) {
|
||||
.none => {},
|
||||
.line => drawConsole(),
|
||||
.full => {
|
||||
ui.init();
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => drawError(),
|
||||
.zeroing => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Initializing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Clearing directory counts...");
|
||||
},
|
||||
.hlcnt => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Finalizing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Counting hardlinks... ");
|
||||
if (model.inodes.add_total > 0) {
|
||||
ui.addnum(.default, model.inodes.add_done);
|
||||
ui.addstr(" / ");
|
||||
ui.addnum(.default, model.inodes.add_total);
|
||||
}
|
||||
},
|
||||
.running => drawProgress(),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => main.state = .browse,
|
||||
.zeroing => {},
|
||||
.hlcnt => {},
|
||||
.running => {
|
||||
switch (ch) {
|
||||
'q' => {
|
||||
if (main.config.confirm_quit) global.need_confirm_quit = !global.need_confirm_quit
|
||||
else ui.quit();
|
||||
},
|
||||
'y', 'Y' => if (global.need_confirm_quit) ui.quit(),
|
||||
else => global.need_confirm_quit = false,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
202
src/ui.zig
202
src/ui.zig
|
|
@ -6,26 +6,18 @@
|
|||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
pub const c = @cImport({
|
||||
@cDefine("_XOPEN_SOURCE", "1");
|
||||
@cInclude("stdio.h");
|
||||
@cInclude("string.h");
|
||||
@cInclude("curses.h");
|
||||
@cInclude("time.h");
|
||||
@cInclude("wchar.h");
|
||||
@cInclude("locale.h");
|
||||
});
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub var inited: bool = false;
|
||||
pub var main_thread: std.Thread.Id = undefined;
|
||||
pub var oom_threads = std.atomic.Value(usize).init(0);
|
||||
|
||||
pub var rows: u32 = undefined;
|
||||
pub var cols: u32 = undefined;
|
||||
|
||||
pub fn die(comptime fmt: []const u8, args: anytype) noreturn {
|
||||
deinit();
|
||||
const stderr = std.io.getStdErr();
|
||||
stderr.writer().print(fmt, args) catch {};
|
||||
std.debug.print(fmt, args);
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -34,6 +26,8 @@ pub fn quit() noreturn {
|
|||
std.process.exit(0);
|
||||
}
|
||||
|
||||
const sleep = if (@hasDecl(std.time, "sleep")) std.time.sleep else std.Thread.sleep;
|
||||
|
||||
// Should be called when malloc fails. Will show a message to the user, wait
|
||||
// for a second and return to give it another try.
|
||||
// Glitch: this function may be called while we're in the process of drawing
|
||||
|
|
@ -44,13 +38,19 @@ pub fn quit() noreturn {
|
|||
// no clue if ncurses will consistently report OOM, but we're not handling that
|
||||
// right now.
|
||||
pub fn oom() void {
|
||||
@branchHint(.cold);
|
||||
if (main_thread == std.Thread.getCurrentId()) {
|
||||
const haveui = inited;
|
||||
deinit();
|
||||
const stderr = std.io.getStdErr();
|
||||
stderr.writeAll("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8") catch {};
|
||||
std.time.sleep(std.time.ns_per_s);
|
||||
std.debug.print("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8", .{});
|
||||
sleep(std.time.ns_per_s);
|
||||
if (haveui)
|
||||
init();
|
||||
} else {
|
||||
_ = oom_threads.fetchAdd(1, .monotonic);
|
||||
sleep(std.time.ns_per_s);
|
||||
_ = oom_threads.fetchSub(1, .monotonic);
|
||||
}
|
||||
}
|
||||
|
||||
// Dumb strerror() alternative for Zig file I/O, not complete.
|
||||
|
|
@ -75,11 +75,12 @@ pub fn errorString(e: anyerror) [:0]const u8 {
|
|||
error.ReadOnlyFilesystem => "Read-only filesystem",
|
||||
error.SymlinkLoop => "Symlink loop",
|
||||
error.SystemFdQuotaExceeded => "System file descriptor limit exceeded",
|
||||
error.EndOfStream => "Unexpected end of file",
|
||||
else => @errorName(e),
|
||||
};
|
||||
}
|
||||
|
||||
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
|
||||
var to_utf8_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
fn toUtf8BadChar(ch: u8) bool {
|
||||
return switch (ch) {
|
||||
|
|
@ -106,19 +107,19 @@ pub fn toUtf8(in: [:0]const u8) [:0]const u8 {
|
|||
if (std.unicode.utf8ByteSequenceLength(in[i])) |cp_len| {
|
||||
if (!toUtf8BadChar(in[i]) and i + cp_len <= in.len) {
|
||||
if (std.unicode.utf8Decode(in[i .. i + cp_len])) |_| {
|
||||
to_utf8_buf.appendSlice(in[i .. i + cp_len]) catch unreachable;
|
||||
to_utf8_buf.appendSlice(main.allocator, in[i .. i + cp_len]) catch unreachable;
|
||||
i += cp_len;
|
||||
continue;
|
||||
} else |_| {}
|
||||
}
|
||||
} else |_| {}
|
||||
to_utf8_buf.writer().print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||
to_utf8_buf.writer(main.allocator).print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||
i += 1;
|
||||
}
|
||||
return util.arrayListBufZ(&to_utf8_buf);
|
||||
return util.arrayListBufZ(&to_utf8_buf, main.allocator);
|
||||
}
|
||||
|
||||
var shorten_buf = std.ArrayList(u8).init(main.allocator);
|
||||
var shorten_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
// Shorten the given string to fit in the given number of columns.
|
||||
// If the string is too long, only the prefix and suffix will be printed, with '...' in between.
|
||||
|
|
@ -149,8 +150,8 @@ pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
|||
if (total_width <= max_width) return in;
|
||||
|
||||
shorten_buf.shrinkRetainingCapacity(0);
|
||||
shorten_buf.appendSlice(in[0..prefix_end]) catch unreachable;
|
||||
shorten_buf.appendSlice("...") catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, in[0..prefix_end]) catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, "...") catch unreachable;
|
||||
|
||||
var start_width: u32 = prefix_width;
|
||||
var start_len: u32 = prefix_end;
|
||||
|
|
@ -162,11 +163,11 @@ pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
|||
start_width += cp_width;
|
||||
start_len += cp_len;
|
||||
if (total_width - start_width <= max_width - prefix_width - 3) {
|
||||
shorten_buf.appendSlice(in[start_len..]) catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, in[start_len..]) catch unreachable;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return util.arrayListBufZ(&shorten_buf);
|
||||
return util.arrayListBufZ(&shorten_buf, main.allocator);
|
||||
}
|
||||
|
||||
fn shortenTest(in: [:0]const u8, max_width: u32, out: [:0]const u8) !void {
|
||||
|
|
@ -287,7 +288,7 @@ pub const Style = lbl: {
|
|||
};
|
||||
}
|
||||
break :lbl @Type(.{
|
||||
.Enum = .{
|
||||
.@"enum" = .{
|
||||
.tag_type = u8,
|
||||
.fields = &fields,
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
|
|
@ -334,8 +335,7 @@ fn updateSize() void {
|
|||
fn clearScr() void {
|
||||
// Send a "clear from cursor to end of screen" instruction, to clear a
|
||||
// potential line left behind from scanning in -1 mode.
|
||||
const stderr = std.io.getStdErr();
|
||||
stderr.writeAll("\x1b[J") catch {};
|
||||
std.debug.print("\x1b[J", .{});
|
||||
}
|
||||
|
||||
pub fn init() void {
|
||||
|
|
@ -405,39 +405,86 @@ pub fn addch(ch: c.chtype) void {
|
|||
// unit = " XB" or " XiB"
|
||||
// Concatenated, these take 8 columns in SI mode or 9 otherwise.
|
||||
pub const FmtSize = struct {
|
||||
buf: [8:0]u8,
|
||||
buf: [5:0]u8,
|
||||
unit: [:0]const u8,
|
||||
|
||||
pub fn fmt(v: u64) @This() {
|
||||
var r: @This() = undefined;
|
||||
var f: f32 = @floatFromInt(v);
|
||||
if (main.config.si) {
|
||||
if(f < 1000.0) { r.unit = " B"; }
|
||||
else if(f < 1e6) { r.unit = " KB"; f /= 1e3; }
|
||||
else if(f < 1e9) { r.unit = " MB"; f /= 1e6; }
|
||||
else if(f < 1e12) { r.unit = " GB"; f /= 1e9; }
|
||||
else if(f < 1e15) { r.unit = " TB"; f /= 1e12; }
|
||||
else if(f < 1e18) { r.unit = " PB"; f /= 1e15; }
|
||||
else { r.unit = " EB"; f /= 1e18; }
|
||||
}
|
||||
else {
|
||||
if(f < 1000.0) { r.unit = " B"; }
|
||||
else if(f < 1023e3) { r.unit = " KiB"; f /= 1024.0; }
|
||||
else if(f < 1023e6) { r.unit = " MiB"; f /= 1048576.0; }
|
||||
else if(f < 1023e9) { r.unit = " GiB"; f /= 1073741824.0; }
|
||||
else if(f < 1023e12) { r.unit = " TiB"; f /= 1099511627776.0; }
|
||||
else if(f < 1023e15) { r.unit = " PiB"; f /= 1125899906842624.0; }
|
||||
else { r.unit = " EiB"; f /= 1152921504606846976.0; }
|
||||
}
|
||||
_ = std.fmt.bufPrintZ(&r.buf, "{d:>5.1}", .{f}) catch unreachable;
|
||||
return r;
|
||||
fn init(u: [:0]const u8, n: u64, mul: u64, div: u64) FmtSize {
|
||||
return .{
|
||||
.unit = u,
|
||||
.buf = util.fmt5dec(@intCast( ((n*mul) +| (div / 2)) / div )),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn num(self: *const @This()) [:0]const u8 {
|
||||
return std.mem.sliceTo(&self.buf, 0);
|
||||
pub fn fmt(v: u64) FmtSize {
|
||||
if (main.config.si) {
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 999_950) { return FmtSize.init(" kB", v, 1, 100); }
|
||||
else if (v < 999_950_000) { return FmtSize.init(" MB", v, 1, 100_000); }
|
||||
else if (v < 999_950_000_000) { return FmtSize.init(" GB", v, 1, 100_000_000); }
|
||||
else if (v < 999_950_000_000_000) { return FmtSize.init(" TB", v, 1, 100_000_000_000); }
|
||||
else if (v < 999_950_000_000_000_000) { return FmtSize.init(" PB", v, 1, 100_000_000_000_000); }
|
||||
else { return FmtSize.init(" EB", v, 1, 100_000_000_000_000_000); }
|
||||
} else {
|
||||
// Cutoff values are obtained by calculating 999.949999999999999999999999 * div with an infinite-precision calculator.
|
||||
// (Admittedly, this precision is silly)
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 1023949) { return FmtSize.init(" KiB", v, 10, 1<<10); }
|
||||
else if (v < 1048523572) { return FmtSize.init(" MiB", v, 10, 1<<20); }
|
||||
else if (v < 1073688136909) { return FmtSize.init(" GiB", v, 10, 1<<30); }
|
||||
else if (v < 1099456652194612) { return FmtSize.init(" TiB", v, 10, 1<<40); }
|
||||
else if (v < 1125843611847281869) { return FmtSize.init(" PiB", v, 10, 1<<50); }
|
||||
else { return FmtSize.init(" EiB", v, 1, (1<<60)/10); }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num(self: *const FmtSize) [:0]const u8 {
|
||||
return &self.buf;
|
||||
}
|
||||
|
||||
fn testEql(self: FmtSize, exp: []const u8) !void {
|
||||
var buf: [10]u8 = undefined;
|
||||
try std.testing.expectEqualStrings(exp, try std.fmt.bufPrint(&buf, "{s}{s}", .{ self.num(), self.unit }));
|
||||
}
|
||||
};
|
||||
|
||||
test "fmtsize" {
|
||||
main.config.si = true;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1049).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1050).testEql(" 1.1 kB");
|
||||
try FmtSize.fmt( 999_899).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_949).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_950).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 1000_000).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 999_850_009).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_899_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_900_000).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_949_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_950_000).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt( 999_999_999).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 18.4 EB");
|
||||
|
||||
main.config.si = false;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 1024).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 102400).testEql("100.0 KiB");
|
||||
try FmtSize.fmt( 1023898).testEql("999.9 KiB");
|
||||
try FmtSize.fmt( 1023949).testEql(" 1.0 MiB");
|
||||
try FmtSize.fmt( 1048523571).testEql("999.9 MiB");
|
||||
try FmtSize.fmt( 1048523572).testEql(" 1.0 GiB");
|
||||
try FmtSize.fmt( 1073688136908).testEql("999.9 GiB");
|
||||
try FmtSize.fmt( 1073688136909).testEql(" 1.0 TiB");
|
||||
try FmtSize.fmt( 1099456652194611).testEql("999.9 TiB");
|
||||
try FmtSize.fmt( 1099456652194612).testEql(" 1.0 PiB");
|
||||
try FmtSize.fmt(1125843611847281868).testEql("999.9 PiB");
|
||||
try FmtSize.fmt(1125843611847281869).testEql(" 1.0 EiB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 16.0 EiB");
|
||||
}
|
||||
|
||||
// Print a formatted human-readable size string onto the given background.
|
||||
pub fn addsize(bg: Bg, v: u64) void {
|
||||
const r = FmtSize.fmt(v);
|
||||
|
|
@ -586,7 +633,7 @@ pub fn getch(block: bool) i32 {
|
|||
}
|
||||
if (ch == c.ERR) {
|
||||
if (!block) return 0;
|
||||
std.time.sleep(10*std.time.ns_per_ms);
|
||||
sleep(10*std.time.ns_per_ms);
|
||||
continue;
|
||||
}
|
||||
return ch;
|
||||
|
|
@ -594,3 +641,50 @@ pub fn getch(block: bool) i32 {
|
|||
die("Error reading keyboard input, assuming TTY has been lost.\n(Potentially nonsensical error message: {s})\n",
|
||||
.{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||
}
|
||||
|
||||
fn waitInput() void {
|
||||
if (@hasDecl(std.io, "getStdIn")) {
|
||||
std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
} else {
|
||||
var buf: [512]u8 = undefined;
|
||||
var rd = std.fs.File.stdin().reader(&buf);
|
||||
_ = rd.interface.discardDelimiterExclusive('\n') catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runCmd(cmd: []const []const u8, cwd: ?[]const u8, env: *std.process.EnvMap, reporterr: bool) void {
|
||||
deinit();
|
||||
defer init();
|
||||
|
||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||
if (env.get("NCDU_LEVEL")) |l|
|
||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||
'0'...'8' => |d| &[1] u8{d+1},
|
||||
'9' => "9",
|
||||
else => "1"
|
||||
}) catch unreachable
|
||||
else
|
||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||
|
||||
var child = std.process.Child.init(cmd, main.allocator);
|
||||
child.cwd = cwd;
|
||||
child.env_map = env;
|
||||
|
||||
const term = child.spawnAndWait() catch |e| blk: {
|
||||
std.debug.print("Error running command: {s}\n\nPress enter to continue.\n", .{ ui.errorString(e) });
|
||||
waitInput();
|
||||
break :blk std.process.Child.Term{ .Exited = 0 };
|
||||
};
|
||||
|
||||
const n = switch (term) {
|
||||
.Exited => "error",
|
||||
.Signal => "signal",
|
||||
.Stopped => "stopped",
|
||||
.Unknown => "unknown",
|
||||
};
|
||||
const v = switch (term) { inline else => |v| v };
|
||||
if (term != .Exited or (reporterr and v != 0)) {
|
||||
std.debug.print("\nCommand returned with {s} code {}.\nPress enter to continue.\n", .{ n, v });
|
||||
waitInput();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
116
src/util.zig
116
src/util.zig
|
|
@ -2,6 +2,7 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Cast any integer type to the target type, clamping the value to the supported maximum if necessary.
|
||||
pub fn castClamp(comptime T: type, x: anytype) T {
|
||||
|
|
@ -17,8 +18,8 @@ pub fn castClamp(comptime T: type, x: anytype) T {
|
|||
|
||||
// Cast any integer type to the target type, truncating if necessary.
|
||||
pub fn castTruncate(comptime T: type, x: anytype) T {
|
||||
const Ti = @typeInfo(T).Int;
|
||||
const Xi = @typeInfo(@TypeOf(x)).Int;
|
||||
const Ti = @typeInfo(T).int;
|
||||
const Xi = @typeInfo(@TypeOf(x)).int;
|
||||
const nx: std.meta.Int(Ti.signedness, Xi.bits) = @bitCast(x);
|
||||
return if (Xi.bits > Ti.bits) @truncate(nx) else nx;
|
||||
}
|
||||
|
|
@ -31,12 +32,43 @@ pub fn blocksToSize(b: u64) u64 {
|
|||
// Ensure the given arraylist buffer gets zero-terminated and returns a slice
|
||||
// into the buffer. The returned buffer is invalidated whenever the arraylist
|
||||
// is freed or written to.
|
||||
pub fn arrayListBufZ(buf: *std.ArrayList(u8)) [:0]const u8 {
|
||||
buf.append(0) catch unreachable;
|
||||
pub fn arrayListBufZ(buf: *std.ArrayListUnmanaged(u8), alloc: std.mem.Allocator) [:0]const u8 {
|
||||
buf.append(alloc, 0) catch unreachable;
|
||||
defer buf.items.len -= 1;
|
||||
return buf.items[0..buf.items.len-1:0];
|
||||
}
|
||||
|
||||
// Format an integer as right-aligned '###.#'.
|
||||
// Pretty much equivalent to:
|
||||
// std.fmt.bufPrintZ(.., "{d:>5.1}", @floatFromInt(n)/10.0);
|
||||
// Except this function doesn't pull in large float formatting tables.
|
||||
pub fn fmt5dec(n: u14) [5:0]u8 {
|
||||
std.debug.assert(n <= 9999);
|
||||
var buf: [5:0]u8 = " 0.0".*;
|
||||
var v = n;
|
||||
buf[4] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
buf[2] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[1] = '0' + @as(u8, @intCast(v % 10));
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[0] = '0' + @as(u8, @intCast(v));
|
||||
return buf;
|
||||
}
|
||||
|
||||
test "fmt5dec" {
|
||||
const eq = std.testing.expectEqualStrings;
|
||||
try eq(" 0.0", &fmt5dec(0));
|
||||
try eq(" 0.5", &fmt5dec(5));
|
||||
try eq(" 9.5", &fmt5dec(95));
|
||||
try eq(" 12.5", &fmt5dec(125));
|
||||
try eq("123.9", &fmt5dec(1239));
|
||||
try eq("999.9", &fmt5dec(9999));
|
||||
}
|
||||
|
||||
|
||||
// Straightforward Zig port of strnatcmp() from https://github.com/sourcefrog/natsort/
|
||||
// (Requiring nul-terminated strings is ugly, but we've got them anyway and it does simplify the code)
|
||||
pub fn strnatcmp(a: [:0]const u8, b: [:0]const u8) std.math.Order {
|
||||
|
|
@ -139,3 +171,79 @@ test "strnatcmp" {
|
|||
for (i+1..w.len) |j| try eq(strnatcmp(w[i], w[j]), .lt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn expanduser(path: []const u8, alloc: std.mem.Allocator) ![:0]u8 {
|
||||
if (path.len == 0 or path[0] != '~') return alloc.dupeZ(u8, path);
|
||||
|
||||
const len = std.mem.indexOfScalar(u8, path, '/') orelse path.len;
|
||||
const home_raw = blk: {
|
||||
const pwd = pwd: {
|
||||
if (len == 1) {
|
||||
if (std.posix.getenvZ("HOME")) |p| break :blk p;
|
||||
break :pwd c.getpwuid(c.getuid());
|
||||
} else {
|
||||
const name = try alloc.dupeZ(u8, path[1..len]);
|
||||
defer alloc.free(name);
|
||||
break :pwd c.getpwnam(name.ptr);
|
||||
}
|
||||
};
|
||||
if (pwd != null)
|
||||
if (@as(*c.struct_passwd, pwd).pw_dir) |p|
|
||||
break :blk std.mem.span(p);
|
||||
return alloc.dupeZ(u8, path);
|
||||
};
|
||||
const home = std.mem.trimRight(u8, home_raw, "/");
|
||||
|
||||
if (home.len == 0 and path.len == len) return alloc.dupeZ(u8, "/");
|
||||
return try std.mem.concatWithSentinel(alloc, u8, &.{ home, path[len..] }, 0);
|
||||
}
|
||||
|
||||
|
||||
// Silly abstraction to read a file one line at a time. Only exists to help
|
||||
// with supporting both Zig 0.14 and 0.15, can be removed once 0.14 support is
|
||||
// dropped.
|
||||
pub const LineReader = if (@hasDecl(std.io, "bufferedReader")) struct {
|
||||
rd: std.io.BufferedReader(4096, std.fs.File.Reader),
|
||||
fbs: std.io.FixedBufferStream([]u8),
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{
|
||||
.rd = std.io.bufferedReader(f.reader()),
|
||||
.fbs = std.io.fixedBufferStream(buf),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
s.fbs.reset();
|
||||
s.rd.reader().streamUntilDelimiter(s.fbs.writer(), '\n', s.fbs.buffer.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (s.fbs.getPos() catch unreachable == 0) return null,
|
||||
else => |e| return e,
|
||||
};
|
||||
return s.fbs.getWritten();
|
||||
}
|
||||
|
||||
} else struct {
|
||||
rd: std.fs.File.Reader,
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{ .rd = f.readerStreaming(buf) };
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
// Can't use takeDelimiter() because that's not available in 0.15.1,
|
||||
// Can't use takeDelimiterExclusive() because that changed behavior in 0.15.2.
|
||||
const r = &s.rd.interface;
|
||||
const result = r.peekDelimiterInclusive('\n') catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
const remaining = r.buffer[r.seek..r.end];
|
||||
if (remaining.len == 0) return null;
|
||||
r.toss(remaining.len);
|
||||
return remaining;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
r.toss(result.len);
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
};
|
||||
|
|
|
|||
Loading…
Reference in a new issue