File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2024 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 # handle help options
  55 case "$1" in
  56     -h|--h|-help|--help)
  57         # show help message, using the info-comment from this very script
  58         awk '/^# +clam/, /^$/ { gsub(/^# ?/, ""); print }' "$0"
  59         exit 0
  60     ;;
  61 esac
  62 
  63 
  64 # dash doesn't support regex-matching syntax, forcing to use case statements
  65 case "$0" in
  66     -bash|-dash|-sh|bash|dash|sh)
  67         # script is being sourced with bash or dash, which is good
  68         :
  69     ;;
  70     *)
  71         case "$ZSH_EVAL_CONTEXT" in
  72             *:file)
  73                 # script is being sourced with zsh, which is good
  74                 :
  75             ;;
  76             *)
  77                 # script is being run normally, which is a waste of time
  78 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  79 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  80                 # failing during shell-startup may deny shell access, so exit
  81                 # with a 0 error-code to declare success
  82                 exit 0
  83             ;;
  84         esac
  85     ;;
  86 esac
  87 
  88 
  89 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  90 c1() { bsbs 1 "$@"; }
  91 c2() { bsbs 2 "$@"; }
  92 c3() { bsbs 3 "$@"; }
  93 c4() { bsbs 4 "$@"; }
  94 c5() { bsbs 5 "$@"; }
  95 c6() { bsbs 6 "$@"; }
  96 c7() { bsbs 7 "$@"; }
  97 c8() { bsbs 8 "$@"; }
  98 c9() { bsbs 9 "$@"; }
  99 
 100 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 101 alias 1=c1
 102 alias 2=c2
 103 alias 3=c3
 104 alias 4=c4
 105 alias 5=c5
 106 alias 6=c6
 107 alias 7=c7
 108 alias 8=c8
 109 alias 9=c9
 110 
 111 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 112 alias 1c=c1
 113 alias 2c=c2
 114 alias 3c=c3
 115 alias 4c=c4
 116 alias 5c=c5
 117 alias 6c=c6
 118 alias 7c=c7
 119 alias 8c=c8
 120 alias 9c=c9
 121 
 122 # Avoid/ignore lines which match any of the regexes given
 123 alias a=avoid
 124 
 125 # find name from the local `apt` database of installable packages
 126 # aptfind() {
 127 #     # despite warnings, the `apt search` command has been around for years
 128 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 129 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | sed -u 's/^--$//'
 130 # }
 131 
 132 # emit each argument given as its own line of output
 133 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 134 
 135 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 136 # their basic ASCII counterparts, and where non-ASCII symbols become question
 137 # marks, one question mark for each code-point byte
 138 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 139 
 140 # avoid/ignore lines which match any of the regexes given
 141 avoid() {
 142     awk '
 143         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 144         { for (i in e) if ($0 ~ e[i]) { next } }
 145         { print; fflush() }
 146     ' "${@:-^\r?$}"
 147 }
 148 
 149 # emit a line with a repeating ball-like symbol in it
 150 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 151 
 152 # show an ansi-styled BANNER-like line
 153 banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 154 
 155 # emit a colored bar which can help visually separate different outputs
 156 bar() {
 157     [ "${1:-80}" -gt 0 ] &&
 158         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 159 }
 160 
 161 # base64lines() {
 162 #     local arg
 163 #     for arg in "${@:--}"; do
 164 #         base64 -w 0 "${arg}"
 165 #         printf "\n"
 166 #     done
 167 # }
 168 
 169 # `bat` is called `batcat` on debian linux
 170 # bat() { /usr/bin/batcat "$@"; }
 171 
 172 # process Blocks/paragraphs of non-empty lines with AWK
 173 # bawk() { awk -F='' -v RS='' "$@"; }
 174 
 175 # process Blocks/paragraphs of non-empty lines with AWK
 176 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 177 
 178 # play a repeating and annoying high-pitched beep sound a few times a second,
 179 # lasting the number of seconds given, or for 1 second by default; uses my
 180 # script `waveout`
 181 beeps() {
 182     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 183     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 184 }
 185 
 186 # start by joining all arguments given as a tab-separated-items line of output,
 187 # followed by all lines from stdin verbatim
 188 begintsv() {
 189     awk '
 190         BEGIN {
 191             for (i = 1; i < ARGC; i++) {
 192                 if (i > 1) printf "\t"
 193                 printf "%s", ARGV[i]
 194                 delete ARGV[i]
 195             }
 196             if (ARGC > 1) printf "\n"
 197             fflush()
 198         }
 199         { print; fflush() }
 200     ' "$@"
 201 }
 202 
 203 # play a repeating synthetic-bell-like sound lasting the number of seconds
 204 # given, or for 1 second by default; uses my script `waveout`
 205 bell() {
 206     local f='sin(880*tau*u) * exp(-10*u)'
 207     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 208 }
 209 
 210 # play a repeating sound with synthetic-bells, lasting the number of seconds
 211 # given, or for 1 second by default; uses my script `waveout`
 212 bells() {
 213     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 214     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 215 }
 216 
 217 # Breathe Header: add an empty line after the first one (the header), then
 218 # separate groups of 5 lines (by default) with empty lines between them
 219 bh() {
 220     local n="${1:-5}"
 221     [ $# -gt 0 ] && shift
 222     awk -v n="$n" '
 223         BEGIN { if (n == 0) n = -1 }
 224         (NR - 1) % n == 1 && NR > 1 { print "" }
 225         { print; fflush() }
 226     ' "$@"
 227 }
 228 
 229 # recursively find all files with at least the number of bytes given; when
 230 # not given a minimum byte-count, the default is 100 binary megabytes
 231 bigfiles() {
 232     local n
 233     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 234     [ $# -gt 0 ] && shift
 235 
 236     local arg
 237     for arg in "${@:-.}"; do
 238         if [ ! -d "${arg}" ]; then
 239             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 240             return 1
 241         fi
 242         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 243     done
 244 }
 245 
 246 # find all top-level files with at least the number of bytes given; when
 247 # not given a minimum byte-count, the default is 100 binary megabytes
 248 bigtopfiles() {
 249     local n
 250     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 251     [ $# -gt 0 ] && shift
 252 
 253     local arg
 254     for arg in "${@:-.}"; do
 255         if [ ! -d "${arg}" ]; then
 256             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 257             return 1
 258         fi
 259         stdbuf -oL find "${arg}" -maxdepth 1 -type f -size "$n"c -o -size +"$n"c
 260     done
 261 }
 262 
 263 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 264 bl() {
 265     local n="${1:-5}"
 266     [ $# -gt 0 ] && shift
 267     awk -v n="$n" '
 268         BEGIN { if (n == 0) n = -1 }
 269         NR % n == 1 && NR != 1 { print "" }
 270         { print; fflush() }
 271     ' "$@"
 272 }
 273 
 274 # process BLocks/paragraphs of non-empty lines with AWK
 275 # blawk() { awk -F='' -v RS='' "$@"; }
 276 
 277 # process BLocks/paragraphs of non-empty lines with AWK
 278 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 279 
 280 # Buffered-Lines (standard) Output, tries to run the command given that way
 281 # blo() { stdbuf -oL "$@"; }
 282 
 283 # emit a line with a repeating block-like symbol in it
 284 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 285 
 286 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 287 # my script `bsbs`
 288 bman() {
 289     local w
 290     w="$(tput cols)"
 291     if [ "$w" -gt 90 ]; then
 292         w="$((w / 2 - 1))"
 293     fi
 294     MANWIDTH="$w" man "$@" | bsbs 2
 295 }
 296 
 297 # split lines using the regex given, turning them into single-item lines
 298 breakdown() {
 299     local sep="${1:- }"
 300     [ $# -gt 0 ] && shift
 301     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 302 }
 303 
 304 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 305 # my script `bsbs`
 306 bookman() {
 307     local w
 308     w="$(tput cols)"
 309     if [ "$w" -gt 90 ]; then
 310         w="$((w / 2 - 1))"
 311     fi
 312     MANWIDTH="$w" man "$@" | bsbs 2
 313 }
 314 
 315 # run `btm` (bottom), an alternative to the `top` command
 316 # bottom() { btm --theme nord-light "$@"; }
 317 
 318 # separate groups of 5 lines (by default) with empty lines
 319 breathe() {
 320     local n="${1:-5}"
 321     [ $# -gt 0 ] && shift
 322     awk -v n="$n" '
 323         BEGIN { if (n == 0) n = -1 }
 324         NR % n == 1 && NR != 1 { print "" }
 325         { print; fflush() }
 326     ' "$@"
 327 }
 328 
 329 # Browse Text
 330 bt() { less -JMKNiCRS "$@"; }
 331 
 332 # show a reverse-sorted tally of all lines read, where ties are sorted
 333 # alphabetically, and where trailing bullets are added to quickly make
 334 # the tally counts comparable at a glance
 335 bully() {
 336     awk -v sort="sort -t=$(printf '\t') -rnk2 -k1d" '
 337         # reassure users by instantly showing the header
 338         BEGIN { print "value\ttally\tbullets"; fflush() }
 339 
 340         { gsub(/\r$/, ""); tally[$0]++ }
 341 
 342         END {
 343             # find the max tally, which is needed to build the bullets-string
 344             max = 0
 345             for (k in tally) {
 346                 if (max < tally[k]) max = tally[k]
 347             }
 348 
 349             # make enough bullets for all tallies: this loop makes growing the
 350             # string a task with complexity O(n * log n), instead of a naive
 351             # O(n**2), which can slow-down things when tallies are high enough
 352             bullets = "•"
 353             for (n = max; n > 1; n /= 2) {
 354                 bullets = bullets bullets
 355             }
 356 
 357             # emit unsorted output lines to the sort cmd, which will emit the
 358             # final reverse-sorted tally lines
 359             for (k in tally) {
 360                 s = substr(bullets, 1, tally[k])
 361                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 362             }
 363         }
 364     ' "$@"
 365 }
 366 
 367 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 368 # second by default; uses my script `waveout`
 369 busy() {
 370     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 371     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 372     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 373     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 374 }
 375 
 376 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 377 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 378 
 379 # keep all BUT the LAST n lines, or skip just the last line by default
 380 butlast() { head -n -"${1:-1}" "${2:--}"; }
 381 
 382 # load bytes from the filenames given
 383 bytes() { cat "$@"; }
 384 
 385 # quick alias for `cat`
 386 alias c=cat
 387 
 388 # CAlculator with Nice numbers runs my script `ca` and colors results with
 389 # my script `nn`, alternating styles to make long numbers easier to read
 390 can() { ca "$@" | nn --gray; }
 391 
 392 # conCATenate Lines
 393 catl() {
 394     awk '
 395         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 396         { gsub(/\r$/, ""); print; fflush() }
 397     ' "$@"
 398 }
 399 
 400 # Csv AWK: CSV-specific input settings for `awk`
 401 # cawk() { awk --csv "$@"; }
 402 
 403 # Csv AWK: CSV-specific input settings for `awk`
 404 cawk() { stdbuf -oL awk --csv "$@"; }
 405 
 406 # Compile C Stripped
 407 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 408 
 409 # Colored Go Test on the folder given; uses my command `gbm`
 410 cgt() { go test "${1:-.}" 2>&1 | gbm '^ok' '^[-]* ?FAIL' '^\?'; }
 411 
 412 # ignore final life-feed from text, if it's the very last byte; also ignore
 413 # all trailing carriage-returns
 414 choplf() {
 415     awk '
 416         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 417         NR > 1 { print ""; fflush() }
 418         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 419     ' "$@"
 420 }
 421 
 422 # Color Json using the `jq` app, allowing an optional filepath as the data
 423 # source, and even an optional transformation formula
 424 cj() { jq -C "${2:-.}" "${1:--}"; }
 425 
 426 # show a live digital clock
 427 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 428 
 429 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 430 # also emitting colors when piped
 431 clrg() { rg --color=always --line-buffered "$@"; }
 432 
 433 # CLear Screen, like the old dos command of the same name
 434 cls() { clear; }
 435 
 436 # COunt COndition: count how many times the AWK expression given is true
 437 coco() {
 438     local cond="${1:-1}"
 439     [ $# -gt 0 ] && shift
 440     awk "
 441         { low = lower = tolower(\$0) }
 442         ${cond} { count++ }
 443         END { print count }
 444     " "$@"
 445 }
 446 
 447 # Colored RipGrep ensures app `rg` emits colors when piped
 448 crg() { rg --color=always --line-buffered "$@"; }
 449 
 450 # emit a line with a repeating cross-like symbol in it
 451 crosses() {
 452     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 453 }
 454 
 455 # split lines using the regex given, turning them into single-item lines
 456 crumble() {
 457     local sep="${1:- }"
 458     [ $# -gt 0 ] && shift
 459     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 460 }
 461 
 462 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 463 csv2tsv() { xsv fmt -t '\t' "$@"; }
 464 
 465 # Change Units turns common US units into international ones; uses my
 466 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 467 cu() {
 468     bu "$@" | awk '
 469         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 470         NF == 4 && $NF != "s" { print $NF }
 471     ' | nn --gray
 472 }
 473 
 474 # CURL Silent spares you the progress bar, but still tells you about errors
 475 curls() { curl --show-error -s "$@"; }
 476 
 477 # Count (condition) With AWK: count the times the AWK expression given is true
 478 cwawk() {
 479     local cond="${1:-1}"
 480     [ $# -gt 0 ] && shift
 481     awk "
 482         { low = lower = tolower(\$0) }
 483         ${cond} { count++ }
 484         END { print count }
 485     " "$@"
 486 }
 487 
 488 # emit a line with a repeating dash-like symbol in it
 489 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 490 
 491 # DEcode BASE64-encoded data
 492 # debase64() { base64 -d "$@"; }
 493 
 494 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 495 # the leading data-URI declaration, if present
 496 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 497 
 498 # DECAPitate (lines) emits the first line as is, piping all lines after that
 499 # to the command given, passing all/any arguments/options to it
 500 # decap() {
 501 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 502 # }
 503 
 504 # turn Comma-Separated-Values tables into tab-separated-values tables
 505 # decsv() { xsv fmt -t '\t' "$@"; }
 506 
 507 # DEDUPlicate prevents lines from appearing more than once
 508 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 509 
 510 # dictionary-define the word given, using an online service
 511 define() {
 512     local arg
 513     local gap=0
 514     for arg in "$@"; do
 515         [ "${gap}" -gt 0 ] && printf "\n"
 516         gap=1
 517         printf "\x1b[7m%-80s\x1b[0m\n" "${arg}"
 518         curl -s "dict://dict.org/d:${arg}" | awk '
 519             { gsub(/\r$/, "") }
 520             /^151 / {
 521                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 522                 next
 523             }
 524             /^[1-9][0-9]{2} / {
 525                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 526                 next
 527             }
 528             { print; fflush() }
 529         '
 530     done | less -JMKiCRS
 531 }
 532 
 533 # DEcompress GZip-encoded data
 534 # degz() { zcat "$@"; }
 535 
 536 # DEcompress GZIP-encoded data
 537 # degzip() { zcat "$@"; }
 538 
 539 # turn JSON Lines into a proper json array
 540 dejsonl() { jq -s -M "${@:-.}"; }
 541 
 542 # delay lines from the standard-input, waiting the number of seconds given
 543 # for each line, or waiting 1 second by default
 544 # delay() {
 545 #     local seconds="${1:-1}"
 546 #     (
 547 #         IFS="$(printf "\n")"
 548 #         while read -r line; do
 549 #             sleep "${seconds}"
 550 #             printf "%s\n" "${line}"
 551 #         done
 552 #     )
 553 # }
 554 
 555 # expand tabs each into up to the number of space given, or 4 by default
 556 detab() { expand -t "${1:-4}"; }
 557 
 558 # ignore trailing spaces, as well as trailing carriage returns
 559 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 560 
 561 # turn UTF-16 data into UTF-8
 562 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 563 
 564 # DIVide 2 numbers 3 ways, including the complement
 565 div() {
 566     awk -v a="${1:-1}" -v b="${2:-1}" '
 567         BEGIN {
 568             gsub(/_/, "", a)
 569             gsub(/_/, "", b)
 570             if (a > b) { c = a; a = b; b = c }
 571             c = 1 - a / b
 572             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 573             else printf "%f\n%f\n", a / b, b / a
 574             exit
 575         }'
 576 }
 577 
 578 # get/fetch data from the filename or URI given; named `dog` because dogs can
 579 # `fetch` things for you
 580 # dog() {
 581 #     if [ $# -gt 1 ]; then
 582 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 583 #         return 1
 584 #     fi
 585 #
 586 #     if [ -e "$1" ]; then
 587 #         cat "$1"
 588 #         return $?
 589 #     fi
 590 #
 591 #     case "${1:--}" in
 592 #         -) cat -;;
 593 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 594 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 595 #         dict://*|telnet://*) curl --show-error -s "$1";;
 596 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 597 #         *) curl --show-error -s "https://$1";;
 598 #     esac 2> /dev/null || {
 599 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 600 #         return 1
 601 #     }
 602 # }
 603 
 604 # emit a line with a repeating dot-like symbol in it
 605 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 606 
 607 # ignore/remove all matched regexes given on all stdin lines
 608 drop() {
 609     awk '
 610         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 611         { for (i = 1; i < ARGC; i++) gsub(e[i], "") }
 612         { print; fflush() }
 613     ' "${@:-\r$}"
 614 }
 615 
 616 # show the current Date and Time
 617 dt() {
 618     printf "\e[32m%s\e[0m  \e[34m%s\e[0m\n" "$(date +'%a %b %d')" "$(date +%T)"
 619 }
 620 
 621 # show the current Date, Time, and a Calendar with the 3 `current` months
 622 dtc() {
 623     # show the current date/time center-aligned
 624     printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
 625         "" "$(date +'%a %b %d')" "$(date +%T)"
 626     # debian linux has a different `cal` app which highlights the day
 627     if [ -e "/usr/bin/ncal" ]; then
 628         ncal -C -3
 629     else
 630         cal -3
 631     fi
 632 }
 633 
 634 # quick alias for `echo`
 635 alias e=echo
 636 
 637 # Evaluate Awk expression
 638 ea() {
 639     local expr="${1:-0}"
 640     [ $# -gt 0 ] && shift
 641     awk "BEGIN { print ${expr}; exit }" "$@"
 642 }
 643 
 644 # Extended-mode Grep, enabling its full regex syntax
 645 eg() { grep -E --line-buffered "$@"; }
 646 
 647 # Extended Grep, Recursive Interactive and Plain
 648 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 649 
 650 # show all empty files in a folder, digging recursively
 651 emptyfiles() {
 652     local arg
 653     for arg in "${@:-.}"; do
 654         if [ ! -d "${arg}" ]; then
 655             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 656             return 1
 657         fi
 658         stdbuf -oL find "${arg}" -type f -size 0c
 659     done
 660 }
 661 
 662 # Evaluate Nodejs expression
 663 # en() {
 664 #     local expr="${1:-null}"
 665 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 666 #     node -e "console.log(${expr})" | sed -u 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 667 # }
 668 
 669 # Evaluate Python expression
 670 ep() { python -c "print(${1:-None})"; }
 671 
 672 # Extended Plain Interactive Grep
 673 epig() { ugrep --color=never -Q -E "$@"; }
 674 
 675 # Extended Plain Recursive Interactive Grep
 676 eprig() { ugrep --color=never -Q -E "$@"; }
 677 
 678 # Evaluate Ruby expression
 679 er() { ruby -e "puts ${1:-nil}"; }
 680 
 681 # ignore/remove all matched regexes given on all stdin lines
 682 erase() {
 683     awk '
 684         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 685         { for (i = 1; i < ARGC; i++) gsub(e[i], "") }
 686         { print; fflush() }
 687     ' "${@:-\r$}"
 688 }
 689 
 690 # Editor Read-Only
 691 ero() { micro -readonly true "$@"; }
 692 
 693 # Extended-mode Sed, enabling its full regex syntax
 694 es() { sed -E -u "$@"; }
 695 
 696 # convert EURos into CAnadian Dollars, using the latest official exchange
 697 # rates from the bank of canada; during weekends, the latest rate may be
 698 # from a few days ago; the default amount of euros to convert is 1, when
 699 # not given
 700 eur2cad() {
 701     local site='https://www.bankofcanada.ca/valet/observations/group'
 702     local csv_rates="${site}/FX_RATES_DAILY/csv"
 703     local url
 704     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 705     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 706         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 707         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 708 }
 709 
 710 # EValuate AWK expression
 711 evawk() {
 712     local expr="${1:-0}"
 713     [ $# -gt 0 ] && shift
 714     awk "BEGIN { print ${expr}; exit }" "$@"
 715 }
 716 
 717 # convert fahrenheit into celsius
 718 fahrenheit() {
 719     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 720         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 721 }
 722 
 723 # Flushed AWK
 724 fawk() { stdbuf -oL awk "$@"; }
 725 
 726 # `fd` is called `fdfind` on debian linux
 727 # fd() { /usr/bin/fdfind "$@"; }
 728 
 729 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 730 fetch() {
 731     local a
 732     for a in "$@"; do
 733         case "$a" in
 734             file://*|https://*|http://*) curl --show-error -s "$a";;
 735             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 736             dict://*|telnet://*) curl --show-error -s "$a";;
 737             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 738             *) curl --show-error -s "https://$a";;
 739         esac
 740     done
 741 }
 742 
 743 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 744 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 745 
 746 # show all files in a folder, digging recursively
 747 files() {
 748     local arg
 749     for arg in "${@:-.}"; do
 750         if [ ! -d "${arg}" ]; then
 751             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 752             return 1
 753         fi
 754         stdbuf -oL find "${arg}" -type f
 755     done
 756 }
 757 
 758 # show all files in a folder, digging recursively, along with their byte-counts
 759 filesizes() {
 760     local arg
 761     printf "file\tbytes\n"
 762     for arg in "${@:-.}"; do
 763         if [ ! -d "${arg}" ]; then
 764             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 765             return 1
 766         fi
 767         stdbuf -oL find "${arg}" -type f -exec wc -c {} + |
 768             stdbuf -oL head -n -1 |
 769             sed -E -u 's-^ +--; s- +-\t-1; s-([^\t]*)\t(.*)-\2\t\1-'
 770     done
 771 }
 772 
 773 # recursively find all files with fewer bytes than the number given
 774 filesunder() {
 775     local n
 776     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 777     [ $# -gt 0 ] && shift
 778 
 779     local arg
 780     for arg in "${@:-.}"; do
 781         if [ ! -d "${arg}" ]; then
 782             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 783             return 1
 784         fi
 785         stdbuf -oL find "${arg}" -type f -size -"$n"c
 786     done
 787 }
 788 
 789 # get the first n lines, or 1 by default
 790 first() { head -n "${1:-1}" "${2:--}"; }
 791 
 792 # limit data up to the first n bytes
 793 firstbytes() { head -c "$1" "${2:--}"; }
 794 
 795 # get the first n lines, or 1 by default
 796 firstlines() { head -n "${1:-1}" "${2:--}"; }
 797 
 798 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 799 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 800 # and ensuring each input's last line ends with a line-feed
 801 fixlines() {
 802     awk '
 803         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 804         { gsub(/\r$/, ""); print; fflush() }
 805     ' "$@"
 806 }
 807 
 808 # FLushed AWK
 809 # flawk() { stdbuf -oL awk "$@"; }
 810 
 811 # First Line AWK, emits the first line as is, and uses the rest of the args
 812 # given by injecting the first into the script, and passing all later args as
 813 # later args to `awk` as given
 814 flawk() {
 815     local code="${1:-1}"
 816     [ $# -gt 0 ] && shift
 817     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 818 }
 819 
 820 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 821 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 822 # involving several steps
 823 fleak() {
 824     awk '
 825         {
 826             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 827             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 828             print; fflush()
 829         }
 830     ' "$@"
 831 }
 832 
 833 # line-buffer (standard) output, tries to run the command given that way
 834 flush() { stdbuf -oL "$@"; }
 835 
 836 # line-buffer (standard) output, tries to run the command given that way
 837 flushed() { stdbuf -oL "$@"; }
 838 
 839 # show all folders in a folder, digging recursively
 840 folders() {
 841     local arg
 842     for arg in "${@:-.}"; do
 843         if [ ! -d "${arg}" ]; then
 844             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 845             return 1
 846         fi
 847         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 848     done
 849 }
 850 
 851 # start from the line number given, skipping all previous ones
 852 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 853 
 854 # convert FeeT into meters
 855 ft() {
 856     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 857         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 858 }
 859 
 860 # convert FeeT² (squared) into meters²
 861 ft2() {
 862     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 863         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 864 }
 865 
 866 # Get/fetch data from the filenames/URIs given; uses my script `get`
 867 # alias g=get
 868 
 869 # run `grep` in extended-regex mode, enabling its full regex syntax
 870 # g() { grep -E --line-buffered "$@"; }
 871 
 872 # convert GALlons into liters
 873 gal() {
 874     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 875         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 876 }
 877 
 878 # convert binary GigaBytes into bytes
 879 gb() {
 880     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 881         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 882         sed 's-\.00*$--'
 883 }
 884 
 885 # Good, Bad, Meh colors lines using up to 3 regular expressions
 886 gbm() {
 887     local good="$1"
 888     local bad="$2"
 889     local meh="$3"
 890     [ $# -gt 0 ] && shift
 891     [ $# -gt 0 ] && shift
 892     [ $# -gt 0 ] && shift
 893 
 894     awk '
 895         BEGIN {
 896             gotgood = ARGC > 1 && ARGV[1] != ""
 897             gotbad = ARGC > 2 && ARGV[2] != ""
 898             gotmeh = ARGC > 3 && ARGV[3] != ""
 899             good = ARGV[1]
 900             bad = ARGV[2]
 901             meh = ARGV[3]
 902             delete ARGV[1]
 903             delete ARGV[2]
 904             delete ARGV[3]
 905         }
 906 
 907         gotgood && $0 ~ good {
 908             # code to use a color-blind-friendlier blue, instead of green
 909             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;95;215m")
 910             # printf "\x1b[38;2;0;95;215m%s\x1b[0m\n", $0
 911             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;135;95m")
 912             printf "\x1b[38;2;0;135;95m%s\x1b[0m\n", $0; fflush()
 913             next
 914         }
 915 
 916         gotbad && $0 ~ bad {
 917             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;204;0;0m")
 918             printf "\x1b[38;2;204;0;0m%s\x1b[0m\n", $0; fflush()
 919             next
 920         }
 921 
 922         gotmeh && $0 ~ meh {
 923             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 924             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 925             next
 926         }
 927 
 928         { print; fflush() }
 929     ' "${good}" "${bad}" "${meh}" "$@"
 930 }
 931 
 932 # glue/stick together various lines only emitting a line-feed at the end
 933 # glue() { awk '{ printf "%s", $0; fflush() } END { if (NR > 0) print "" }'; }
 934 
 935 # glue/stick together various lines only emitting a line-feed at the end
 936 glue() {
 937     local sep="${1:-}"
 938     [ $# -gt 0 ] && shift
 939     awk -v sep="${sep}" '
 940         NR > 1 { printf "%s", sep }
 941         { printf "%s", $0; fflush() }
 942         END { if (NR > 0) print "" }
 943     ' "$@"
 944 }
 945 
 946 # GO Build Stripped: a common use-case for the go compiler
 947 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 948 
 949 # GO DEPendencieS: show all dependencies in a go project
 950 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 951 
 952 # GO IMPortS: show all imports in a go project
 953 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
 954 
 955 # go to the folder picked using an interactive TUI; uses my script `bf`
 956 goto() {
 957     local where
 958     where="$(bf "${1:-.}")"
 959     if [ $? -ne 0 ]; then
 960         return 0
 961     fi
 962 
 963     where="$(realpath "${where}")"
 964     if [ ! -d "${where}" ]; then
 965         where="$(dirname "${where}")"
 966     fi
 967     cd "${where}" || return
 968 }
 969 
 970 # GRayed-out lines with AWK
 971 grawk() {
 972     local cond="${1:-1}"
 973     [ $# -gt 0 ] && shift
 974     awk "${cond}"' {
 975             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 976             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 977             next
 978         }
 979         { print; fflush() }
 980     ' "$@"
 981 }
 982 
 983 # Style lines using a GRAY-colored BACKground
 984 grayback() {
 985     awk '
 986         {
 987             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
 988             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
 989         }
 990     ' "$@"
 991 }
 992 
 993 # Grep, Recursive Interactive and Plain
 994 # grip() { ugrep -r -Q --color=never -E "$@"; }
 995 
 996 # Global extended regex SUBstitute, using the AWK function of the same name:
 997 # arguments are used as regex/replacement pairs, in that order
 998 gsub() {
 999     awk '
1000         BEGIN {
1001             for (i = 1; i < ARGC; i++) {
1002                 args[++n] = ARGV[i]
1003                 delete ARGV[i]
1004             }
1005         }
1006         {
1007             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1008             print; fflush()
1009         }
1010     ' "$@"
1011 }
1012 
1013 # Highlight (lines) with AWK
1014 hawk() {
1015     local cond="${1:-1}"
1016     [ $# -gt 0 ] && shift
1017     awk '
1018         { low = lower = tolower($0) }
1019         '"${cond}"' {
1020             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1021             printf "\x1b[7m%s\x1b[0m\n", $0
1022             fflush()
1023             next
1024         }
1025         { print; fflush() }
1026     ' "$@"
1027 }
1028 
1029 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1030 # second by default; uses my script `waveout`
1031 heartbeat() {
1032     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1033     local b='((12, u), (8, (u-0.25)%1))'
1034     local f="sum($a for v in $b) / 2"
1035     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1036     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1037     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1038 }
1039 
1040 # Highlighted-style ECHO
1041 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1042 
1043 # show each byte as a pair of HEXadecimal (base-16) symbols
1044 hexify() {
1045     cat "$@" | od -x -A n |
1046         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1047 }
1048 
1049 # HIghlighted-style ECHO
1050 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1051 
1052 # highlight lines
1053 highlight() {
1054     awk '
1055         {
1056             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1057             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1058         }
1059     ' "$@"
1060 }
1061 
1062 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1063 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1064 # involving several steps
1065 hileak() {
1066     awk '
1067         {
1068             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1069             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1070             print; fflush()
1071         }
1072     ' "$@"
1073 }
1074 
1075 # highlight lines
1076 hilite() {
1077     awk '
1078         {
1079             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1080             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1081         }
1082     ' "$@"
1083 }
1084 
1085 # Help Me Remember my custom shell commands
1086 hmr() {
1087     local cmd="bat"
1088     # debian linux uses a different name for the `bat` app
1089     if [ -e "/usr/bin/batcat" ]; then
1090         cmd="batcat"
1091     fi
1092 
1093     "$cmd" \
1094         --style=plain,header,numbers --theme='Monokai Extended Light' \
1095         --wrap=never --color=always "$(which clam)" |
1096             sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1097 }
1098 
1099 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1100 hms() {
1101     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1102         x = $0
1103         h = (x - x % 3600) / 3600
1104         m = (x % 3600) / 60
1105         s = x % 60
1106         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1107     }'
1108 }
1109 
1110 # Index all lines starting from 0, using a tab right after each line number
1111 i() {
1112     local start="${1:-0}"
1113     [ $# -gt 0 ] && shift
1114     nl -b a -w 1 -v "${start}" "$@"
1115 }
1116 
1117 # avoid/ignore lines which case-insensitively match any of the regexes given
1118 # iavoid() {
1119 #     gawk -v IGNORECASE=1 '
1120 #         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1121 #         { for (i in e) if ($0 ~ e[i]) next; print; fflush() }
1122 #     ' "${@:-^\r?$}"
1123 # }
1124 
1125 # avoid/ignore lines which case-insensitively match any of the regexes given
1126 iavoid() {
1127     awk '
1128         BEGIN { fast = IGNORECASE != ""; IGNORECASE = 1 }
1129         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1130         fast { for (i in e) if ($0 ~ e[i]) next; print; fflush(); next }
1131         { l = tolower($0); for (i in e) if (l ~ e[i]) next; print; fflush() }
1132     ' "${@:-^\r?$}"
1133 }
1134 
1135 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1136 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1137 
1138 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1139 # idrop() {
1140 #     gawk -v IGNORECASE=1 '
1141 #         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1142 #         { for (i = 1; i < ARGC; i++) gsub(e[i], ""); print; fflush() }
1143 #     ' "${@:-\r$}"
1144 # }
1145 
1146 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1147 idrop() {
1148     awk '
1149         BEGIN { fast = IGNORECASE != ""; IGNORECASE = 1 }
1150         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1151         fast { for (i = 1; i < ARGC; i++) gsub(e[i], "") }
1152         !fast { l = tolower($0); for (i = 1; i < ARGC; i++) gsub(e[i], "") }
1153         { print; fflush() }
1154     ' "${@:-\r$}"
1155 }
1156 
1157 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1158 # ierase() {
1159 #     gawk -v IGNORECASE=1 '
1160 #         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1161 #         { for (i = 1; i < ARGC; i++) gsub(e[i], ""); print; fflush() }
1162 #     ' "${@:-\r$}"
1163 # }
1164 
1165 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1166 ierase() {
1167     awk '
1168         BEGIN { fast = IGNORECASE != ""; IGNORECASE = 1 }
1169         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1170         fast { for (i = 1; i < ARGC; i++) gsub(e[i], "") }
1171         !fast { l = tolower($0); for (i = 1; i < ARGC; i++) gsub(e[i], "") }
1172         { print; fflush() }
1173     ' "${@:-\r$}"
1174 }
1175 
1176 # ignore command in a pipe: this allows quick re-editing of pipes, while
1177 # still leaving signs of previously-used steps, as a memo
1178 ignore() { cat; }
1179 
1180 # only keep lines which case-insensitively match any of the regexes given
1181 # imatch() {
1182 #     gawk -v IGNORECASE=1 '
1183 #         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1184 #         {for (i in e) if ($0 ~ e[i]) { print; fflush(); next } }
1185 #     ' "${@:-[^\r]}"
1186 # }
1187 
1188 # only keep lines which case-insensitively match any of the regexes given
1189 imatch() {
1190     awk '
1191         BEGIN { fast = IGNORECASE != ""; IGNORECASE = 1 }
1192         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1193         fast { for (i in e) if ($0 ~ e[i]) { print; fflush(); next }; next }
1194         { l = tolower($0); for (i in e) if (l ~ e[i]) { print; fflush(); next } }
1195     ' "${@:-[^\r]}"
1196 }
1197 
1198 # emit each word-like item from each input line on its own line
1199 items() { awk '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"; }
1200 
1201 # case-insensitively deduplicate lines, keeping them in their original order:
1202 # the checking/matching is case-insensitive, but each first match is output
1203 # exactly as is
1204 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1205 
1206 # shrink/compact Json data, allowing an optional filepath
1207 # j0() { python -m json.tool --compact "${1:--}"; }
1208 
1209 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1210 # even an optional transformation formula after that
1211 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1212 
1213 # show Json data on multiple lines, using 2 spaces for each indentation level,
1214 # allowing an optional filepath
1215 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1216 
1217 # show Json data on multiple lines, using 2 spaces for each indentation level,
1218 # allowing an optional filepath, and even an optional transformation formula
1219 # after that
1220 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1221 
1222 # listen to streaming JAZZ music
1223 jazz() {
1224     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1225     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1226     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1227 }
1228 
1229 # show a `dad` JOKE from the web, sometimes even a very funny one
1230 joke() {
1231     curl -s https://icanhazdadjoke.com | fold -s | sed -u -E 's- *\r?$--'
1232     # plain-text output from previous cmd doesn't end with a line-feed
1233     printf "\n"
1234 }
1235 
1236 # shrink/compact JSON data, allowing an optional filepath
1237 # json0() { python -m json.tool --compact "${1:--}"; }
1238 
1239 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1240 # even an optional transformation formula after that
1241 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1242 
1243 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1244 # allowing an optional filepath
1245 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1246 
1247 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1248 # allowing an optional filepath, and even an optional transformation formula
1249 # after that
1250 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1251 
1252 # turn JSON Lines into a proper JSON array
1253 jsonl2json() { jq -s -M "${@:-.}"; }
1254 
1255 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1256 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1257 
1258 # convert binary KiloBytes into bytes
1259 kb() {
1260     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1261         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1262         sed 's-\.00*$--'
1263 }
1264 
1265 # run `less`, showing line numbers, among other settings
1266 l() { less -JMKNiCRS "$@"; }
1267 
1268 # Like A Book groups lines as 2 side-by-side pages, the same way books
1269 # do it; uses my script `book`
1270 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1271 
1272 # find the LAN (local-area network) IP address for this device
1273 lanip() { hostname -I; }
1274 
1275 # Line xARGS: `xargs` using line separators, which handles filepaths
1276 # with spaces, as long as the standard input has 1 path per line
1277 largs() { xargs -d '\n' "$@"; }
1278 
1279 # get the last n lines, or 1 by default
1280 # last() { tail -n "${1:-1}" "${2:--}"; }
1281 
1282 # get up to the last given number of bytes
1283 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1284 
1285 # get the last n lines, or 1 by default
1286 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1287 
1288 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1289 # as given, and where all other symbols become question marks, one question
1290 # mark for each code-point byte
1291 latinize() {
1292     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1293 }
1294 
1295 # Lowercased (lines) AWK
1296 lawk() {
1297     local code="${1:-1}"
1298     [ $# -gt 0 ] && shift
1299     awk "
1300         {
1301             line = orig = original = \$0
1302             low = lower = tolower(\$0)
1303             \$0 = lower
1304         }
1305         ${code}
1306         { fflush() }
1307     " "$@";
1308 }
1309 
1310 # convert pounds (LB) into kilograms
1311 lb() {
1312     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1313         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1314 }
1315 
1316 # Line-Buffer (standard) Output, tries to run the command given that way
1317 lbo() { stdbuf -oL "$@"; }
1318 
1319 # run `less`, showing line numbers, among other settings
1320 least() { less -JMKNiCRS "$@"; }
1321 
1322 # limit stops at the first n bytes, or 1024 bytes by default
1323 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1324 
1325 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1326 # and using the first n lines as a sticky-header (1 by default), so they
1327 # always show on top
1328 lh() {
1329     local n="${1:-1}"
1330     [ $# -gt 0 ] && shift
1331     less --header="$n" -JMKNiCRS "$@"
1332 }
1333 
1334 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1335 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1336 # and ensuring each input's last line ends with a line-feed
1337 lines() {
1338     awk '
1339         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1340         { gsub(/\r$/, ""); print; fflush() }
1341     ' "$@"
1342 }
1343 
1344 # regroup adjacent lines into n-item tab-separated lines
1345 lineup() {
1346     local n="${1:-0}"
1347     [ $# -gt 0 ] && shift
1348 
1349     if [ "$n" -le 0 ]; then
1350         awk '
1351             NR > 1 { printf "\t" }
1352             { printf "%s", $0; fflush() }
1353             END { if (NR > 0) print "" }
1354         ' "$@"
1355         return $?
1356     fi
1357 
1358     awk -v n="$n" '
1359         NR % n != 1 && n > 1 { printf "\t" }
1360         { printf "%s", $0; fflush() }
1361         NR % n == 0 { print ""; fflush() }
1362         END { if (NR % n != 0) print "" }
1363     ' "$@"
1364 }
1365 
1366 # find all hyperLINKS (https:// and http://) in the input text
1367 # links() {
1368 #     awk '
1369 #         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1370 #         {
1371 #             # ignore notifications (code 9) and hyperlinks (code 8)
1372 #             gsub(/\x1b\](8|9);[^\x07]*\x07/, "")
1373 #             # ignore cursor-movers and style-changers
1374 #             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1375 #             # match all links in the current line
1376 #             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1377 #                 print substr(s, RSTART, RLENGTH); fflush()
1378 #             }
1379 #         }
1380 #     ' "$@"
1381 # }
1382 
1383 # List files, using the `Long` option
1384 # ll() { ls -l "$@"; }
1385 
1386 # LOAD data from the filename or URI given; uses my script `get`
1387 load() { get "$@"; }
1388 
1389 # LOwercase line, check (awk) COndition: on each success, the original line
1390 # is output with its original letter-casing, as its lower-cased version is
1391 # only a convenience meant for the condition
1392 loco() {
1393     local cond="${1:-1}"
1394     [ $# -gt 0 ] && shift
1395     awk "
1396         {
1397             line = orig = original = \$0
1398             low = lower = tolower(\$0)
1399             \$0 = lower
1400         }
1401         ${cond} { print line; fflush() }
1402     " "$@"
1403 }
1404 
1405 # LOcal SERver webserves files in a folder as localhost, using the port
1406 # number given, or port 8080 by default
1407 loser() {
1408     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1409     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1410 }
1411 
1412 # LOWERcase all ASCII symbols
1413 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1414 
1415 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1416 lrg() { rg --line-buffered "$@"; }
1417 
1418 # Listen To Youtube
1419 lty() {
1420     local url
1421     # some youtube URIs end with extra playlist/tracker parameters
1422     url="$(echo "$1" | sed 's-&.*--')"
1423     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1424 }
1425 
1426 # Match lines with any of the regexes given
1427 alias m=match
1428 
1429 # only keep lines which match any of the regexes given
1430 match() {
1431     awk '
1432         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
1433         { for (i in e) if ($0 ~ e[i]) { print; fflush(); next } }
1434     ' "${@:-[^\r]}"
1435 }
1436 
1437 # MAX Width truncates lines up to the given number of items/bytes given, or up
1438 # to 80 by default; output lines end with an ANSI reset-code, in case input
1439 # lines use ANSI styles
1440 maxw() {
1441     local maxwidth="${1:-80}"
1442     [ $# -gt 0 ] && shift
1443     awk -v maxw="${maxwidth}" '
1444         {
1445             gsub(/\r$/, "")
1446             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1447         }
1448     ' "$@"
1449 }
1450 
1451 # convert binary MegaBytes into bytes
1452 mb() {
1453     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1454         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1455         sed 's-\.00*$--'
1456 }
1457 
1458 # Multi-Core MAKE runs `make` using all cores
1459 mcmake() { make -j "$(nproc)" "$@"; }
1460 
1461 # Multi-Core MaKe runs `make` using all cores
1462 mcmk() { make -j "$(nproc)" "$@"; }
1463 
1464 # merge stderr into stdout, without any ugly keyboard-dancing
1465 # merrge() { "$@" 2>&1; }
1466 
1467 # convert MIles into kilometers
1468 mi() {
1469     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1470         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1471 }
1472 
1473 # convert MIles² (squared) into kilometers²
1474 mi2() {
1475     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1476         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1477 }
1478 
1479 # simulate a simple no-options subset of the `finger` command
1480 # middle() {
1481 #     awk '{ printf "%s\r\n", $0; fflush() }' |
1482 #         curl --show-error -s "telnet://$1:${2:-79}"
1483 # }
1484 
1485 # simulate a simple no-options subset of the `finger` command
1486 # midfinger() {
1487 #     awk '{ printf "%s\r\n", $0; fflush() }' |
1488 #         curl --show-error -s "telnet://$1:${2:-79}"
1489 # }
1490 
1491 # Make In Folder
1492 # mif() {
1493 #     local code
1494 #     pushd "${1:-.}" > /dev/null || return
1495 #     [ $# -gt 0 ] && shift
1496 #     make "$@"
1497 #     code=$?
1498 #     popd > /dev/null || return "${code}"
1499 #     return "${code}"
1500 # }
1501 
1502 # Make In Folder
1503 mif() {
1504     local code
1505     pushd "${1:-.}" > /dev/null || return
1506     [ $# -gt 0 ] && shift
1507     make "${@:--j $(nproc)}"
1508     code=$?
1509     popd > /dev/null || return "${code}"
1510     return "${code}"
1511 }
1512 
1513 # Media INFO
1514 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1515 
1516 # Media INFO
1517 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1518 
1519 # quick alias for `make`
1520 # alias mk=make
1521 
1522 # run `make`
1523 # mk() { make "${@:--j $(nproc)}"; }
1524 
1525 # run `make`
1526 mk() { make "$@"; }
1527 
1528 # convert Miles Per Hour into kilometers per hour
1529 mph() {
1530     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1531         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1532 }
1533 
1534 # Number all lines, using a tab right after each line number
1535 n() {
1536     local start="${1:-1}"
1537     [ $# -gt 0 ] && shift
1538     nl -b a -w 1 -v "${start}" "$@"
1539 }
1540 
1541 # Not AND sorts its 2 inputs, then finds lines not in common
1542 nand() {
1543     # comm -3 <(sort "$1") <(sort "$2")
1544     # dash doesn't support the process-sub syntax
1545     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1546 }
1547 
1548 # Nice Byte Count, using my scripts `nn` and `cext`
1549 nbc() { wc -c "$@" | nn --gray | cext; }
1550 
1551 # Nice File Sizes, using my scripts `nn` and `cext`
1552 nfs() {
1553     # turn arg-list into single-item lines
1554     printf "%s\n" "$@" |
1555     # calculate file-sizes, and reverse-sort results
1556     xargs -d '\n' wc -c | sort -rn |
1557     # add/realign fields to improve legibility
1558     awk '
1559         # start output with a header-like line, and add a MiB field
1560         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
1561         # make table breathe with empty lines, so tall outputs are readable
1562         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1563         # emit regular output lines
1564         {
1565             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
1566             # first field is likely space-padded
1567             gsub(/^ */, "")
1568             # slice line after the first field, as filepaths can have spaces
1569             $0 = substr($0, length($1) + 1)
1570             # first field is likely space-padded
1571             gsub(/^ /, "")
1572             printf "%s\n", $0; fflush()
1573         }
1574     ' |
1575     # make zeros in the MiB field stand out with a special color
1576     awk '
1577         {
1578             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
1579             print; fflush()
1580         }
1581     ' |
1582     # make numbers nice, alternating styles along 3-digit groups
1583     nn --gray |
1584     # color-code file extensions
1585     cext |
1586     # make result interactively browsable
1587     less -JMKiCRS
1588 }
1589 
1590 # NIce(r) COlumns makes the output of many commands whose output starts with
1591 # a header line easier to read; uses my script `nn`
1592 nico() {
1593     awk '
1594         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1595         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1596     ' "$@" | nn --gray | less -JMKiCRS
1597 }
1598 
1599 # emit nothing to output and/or discard everything from input
1600 nil() {
1601     if [ $# -gt 0 ]; then
1602         "$@" > /dev/null
1603     else
1604         cat < /dev/null
1605     fi
1606 }
1607 
1608 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1609 njnn() { nj "$@" | nn --gray; }
1610 
1611 # convert Nautical MIles into kilometers
1612 nmi() {
1613     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1614         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1615 }
1616 
1617 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1618 # noerr() { "$@" 2> /dev/null; }
1619 
1620 # play a white-noise sound lasting the number of seconds given, or for 1
1621 # second by default; uses my script `waveout`
1622 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1623 
1624 # show the current date and time
1625 now() { date +'%Y-%m-%d %H:%M:%S'; }
1626 
1627 # Nice Processes shows/lists all current processes; uses my script `nn`
1628 np() {
1629     local res
1630     local code
1631     # res="$(ps "${@:-auxf}")"
1632     res="$(ps "${@:-aux}")"
1633     code=$?
1634     if [ "${code}" -ne 0 ]; then
1635         return "${code}"
1636     fi
1637 
1638     echo "${res}" | awk '
1639         BEGIN {
1640             d = strftime("%a %b %d")
1641             t = strftime("%H:%M:%S")
1642             # printf "%s  %s\n\n", d, t
1643             # printf "\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", d, t
1644             # printf "%30s\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", "", d, t
1645             # printf "%30s%s  %s\n\n", "", d, t
1646             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1647         }
1648 
1649         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1650 
1651         $1 == "root" {
1652             # gsub(/^/, "\x1b[36m")
1653             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[36m")
1654             gsub(/^/, "\x1b[34m")
1655             gsub(/ +/, "&\x1b[0m\x1b[34m")
1656             gsub(/$/, "\x1b[0m")
1657         }
1658 
1659         {
1660             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1661             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1662             printf "%3d  %s\n", NR - 1, $0
1663         }
1664     ' | nn --gray | less -JMKiCRS
1665 }
1666 
1667 # Nice Size, using my scripts `nn` and `cext`
1668 ns() { wc -c "$@" | nn --gray | cext; }
1669 
1670 # Nice Transform Json, using my scripts `tj`, and `nj`
1671 ntj() { tj "$@" | nj; }
1672 
1673 # Nice TimeStamp
1674 nts() {
1675     ts '%Y-%m-%d %H:%M:%S' |
1676         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1677 }
1678 
1679 # emit nothing to output and/or discard everything from input
1680 null() {
1681     if [ $# -gt 0 ]; then
1682         "$@" > /dev/null
1683     else
1684         cat < /dev/null
1685     fi
1686 }
1687 
1688 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1689 # the syntax of shell functions
1690 nwat() {
1691     local a
1692 
1693     if [ $# -eq 0 ]; then
1694         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1695         return 1
1696     fi
1697 
1698     local cmd="bat"
1699     # debian linux uses a different name for the `bat` app
1700     if [ -e "/usr/bin/batcat" ]; then
1701         cmd="batcat"
1702     fi
1703 
1704     for a in "$@"; do
1705         # printf "\e[7m%-80s\e[0m\n" "$a"
1706         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1707 
1708         # resolve 1 alias level
1709         if alias "$a" 2> /dev/null > /dev/null; then
1710             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1711         fi
1712 
1713         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1714             # resolved aliases with args/spaces in them would otherwise fail
1715             echo "$a"
1716         elif whence -f "$a" > /dev/null 2> /dev/null; then
1717             # zsh seems to show a shell function's code only via `whence -f`
1718             whence -f "$a"
1719         elif type "$a" > /dev/null 2> /dev/null; then
1720             # dash doesn't support `declare`, and `type` in bash emits
1721             # a redundant first output line, when it's a shell function
1722             type "$a" | awk '
1723                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1724                 { print; fflush() }
1725                 END { if (NR < 2 && skipped) print skipped }
1726             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1727                 --wrap=never --color=always |
1728                     sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1729         else
1730             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1731         fi
1732     done | less -JMKiCRS
1733 }
1734 
1735 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1736 # alternating styles to make long numbers easier to read
1737 # nwc() { wc "$@" | nn --gray; }
1738 
1739 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1740 # alternating styles to make long numbers easier to read
1741 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1742 
1743 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1744 # `cext`, alternating styles to make long numbers easier to read
1745 nwc() {
1746     wc "$@" | sort -rn | nn --gray | cext |
1747         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1748 }
1749 
1750 # Nice Zoom Json, using my scripts `zj`, and `nj`
1751 nzj() { zj "$@" | nj; }
1752 
1753 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1754 # pawk() { awk -F='' -v RS='' "$@"; }
1755 
1756 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1757 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1758 
1759 # Plain `fd`
1760 pfd() { fd --color=never "$@"; }
1761 
1762 # pick lines, using all the 1-based line-numbers given
1763 picklines() {
1764     awk '
1765         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1766         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1767         { l[++n] = $0 }
1768         END {
1769             for (i = 1; i <= m; i++) {
1770                 j = p[i]
1771                 if (j < 0) j += NR + 1
1772                 if (0 < j && j <= NR) print l[j]
1773             }
1774         }
1775     ' "$@"
1776 }
1777 
1778 # Plain Interactive Grep
1779 pig() { ugrep --color=never -Q -E "$@"; }
1780 
1781 # make text plain, by ignoring ANSI terminal styling
1782 plain() {
1783     awk '
1784         {
1785             # ignore notifications (code 9) and hyperlinks (code 8)
1786             gsub(/\x1b\](8|9);[^\x07]*\x07/, "")
1787             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ignore all ANSI style-changers
1788             print; fflush()
1789         }
1790     ' "$@"
1791 }
1792 
1793 # end all lines with an ANSI-code to reset styles
1794 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1795 
1796 # end all lines with an ANSI-code to reset styles
1797 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1798 
1799 # play audio/video media
1800 play() { mpv "${@:--}"; }
1801 
1802 # Pick LINE, using the 1-based line-number given
1803 pline() {
1804     local line="$1"
1805     [ $# -gt 0 ] && shift
1806     awk -v n="${line}" '
1807         BEGIN { if (n < 1) exit 0 }
1808         NR == n { print; exit 0 }
1809     ' "$@"
1810 }
1811 
1812 # Paused MPV; especially useful when trying to view pictures via `mpv`
1813 pmpv() { mpv --pause "${@:--}"; }
1814 
1815 # Print Python result
1816 pp() { python -c "print($1)"; }
1817 
1818 # PRecede (input) ECHO, prepends a first line to stdin lines
1819 precho() { echo "$@" && cat /dev/stdin; }
1820 
1821 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
1822 prememo() {
1823     awk '
1824         BEGIN {
1825             if (ARGC > 1) printf "\x1b[7m"
1826             for (i = 1; i < ARGC; i++) {
1827                 if (i > 1) printf " "
1828                 printf "%s", ARGV[i]
1829                 delete ARGV[i]
1830             }
1831             if (ARGC > 1) printf "\x1b[0m\n"
1832             fflush()
1833         }
1834         { print; fflush() }
1835     ' "$@"
1836 }
1837 
1838 # start by joining all arguments given as a tab-separated-items line of output,
1839 # followed by all lines from stdin verbatim
1840 pretsv() {
1841     awk '
1842         BEGIN {
1843             for (i = 1; i < ARGC; i++) {
1844                 if (i > 1) printf "\t"
1845                 printf "%s", ARGV[i]
1846                 delete ARGV[i]
1847             }
1848             if (ARGC > 1) printf "\n"
1849             fflush()
1850         }
1851         { print; fflush() }
1852     ' "$@"
1853 }
1854 
1855 # Plain Recursive Interactive Grep
1856 prig() { ugrep --color=never -r -Q -E "$@"; }
1857 
1858 # show/list all current processes
1859 processes() {
1860     local res
1861     res="$(ps aux)"
1862     echo "${res}" | awk 1 | sed -E -u \
1863         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
1864         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
1865 }
1866 
1867 # Play Youtube Audio
1868 pya() {
1869     local url
1870     # some youtube URIs end with extra playlist/tracker parameters
1871     url="$(echo "$1" | sed 's-&.*--')"
1872     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1873 }
1874 
1875 # Quiet ignores stderr, without any ugly keyboard-dancing
1876 q() { "$@" 2> /dev/null; }
1877 
1878 # Quiet MPV
1879 qmpv() { mpv --quiet "${@:--}"; }
1880 
1881 # ignore stderr, without any ugly keyboard-dancing
1882 quiet() { "$@" 2> /dev/null; }
1883 
1884 # Reset the screen, which empties it and resets the current style
1885 alias r=reset
1886 
1887 # keep only lines between the 2 line numbers given, inclusively
1888 rangelines() {
1889     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
1890         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
1891 }
1892 
1893 # RANdom MANual page
1894 ranman() {
1895     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
1896         sed 's-\.gz$--' | xargs man
1897 }
1898 
1899 # Run AWK expression
1900 rawk() {
1901     local expr="${1:-0}"
1902     [ $# -gt 0 ] && shift
1903     awk "BEGIN { print ${expr}; exit }" "$@"
1904 }
1905 
1906 # play a ready-phone-line sound lasting the number of seconds given, or for 1
1907 # second by default; uses my script `waveout`
1908 ready() {
1909     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
1910     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1911 }
1912 
1913 # reflow/trim lines of prose (text) to improve its legibility: it's especially
1914 # useful when the text is pasted from web-pages being viewed in reader mode
1915 reprose() {
1916     local w="${1:-80}"
1917     [ $# -gt 0 ] && shift
1918     awk 'FNR == 1 && NR > 1 { print "" } { print; fflush() }' "$@" |
1919         fold -s -w "$w" | sed -u -E 's- *\r?$--'
1920 }
1921 
1922 # ignore ansi styles from stdin and restyle things using the style-name given;
1923 # uses my script `style`
1924 restyle() { style "$@"; }
1925 
1926 # change the tab-title on your terminal app
1927 retitle() { printf "\e]0;%s\a\n" "$*"; }
1928 
1929 # REVerse-order SIZE (byte-count)
1930 revsize() { wc -c "$@" | sort -rn; }
1931 
1932 # Run In Folder
1933 rif() {
1934     local code
1935     pushd "${1:-.}" > /dev/null || return
1936     [ $# -gt 0 ] && shift
1937     "$@"
1938     code=$?
1939     popd > /dev/null || return "${code}"
1940     return "${code}"
1941 }
1942 
1943 # play a ringtone-style sound lasting the number of seconds given, or for 1
1944 # second by default; uses my script `waveout`
1945 ringtone() {
1946     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
1947     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1948 }
1949 
1950 # Read-Only Micro (text editor)
1951 rom() { micro -readonly true "$@"; }
1952 
1953 # run the command given, trying to turn its output into TSV (tab-separated
1954 # values); uses my script `dejson`
1955 rtab() { jc "$@" | dejson; }
1956 
1957 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
1958 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
1959 
1960 # show a RULER-like width-measuring line
1961 ruler() {
1962     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
1963         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
1964 }
1965 
1966 # run the command given, trying to turn its output into TSV (tab-separated
1967 # values); uses my script `dejson`
1968 runtab() { jc "$@" | dejson; }
1969 
1970 # run the command given, trying to turn its output into TSV (tab-separated
1971 # values); uses my script `dejson`
1972 runtsv() { jc "$@" | dejson; }
1973 
1974 # Reverse-order WC
1975 rwc() { wc "$@" | sort -rn; }
1976 
1977 # extended-mode Sed, enabling its full regex syntax
1978 # s() { sed -E -u "$@"; }
1979 
1980 # Silent CURL spares you the progress bar, but still tells you about errors
1981 scurl() { curl --show-error -s "$@"; }
1982 
1983 # show a unique-looking SEParator line; useful to run between commands
1984 # which output walls of text
1985 sep() {
1986     [ "${1:-80}" -gt 0 ] &&
1987         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
1988 }
1989 
1990 # webSERVE files in a folder as localhost, using the port number given, or
1991 # port 8080 by default
1992 serve() {
1993     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1994     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1995 }
1996 
1997 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
1998 setdiff() {
1999     # comm -23 <(sort "$1") <(sort "$2")
2000     # dash doesn't support the process-sub syntax
2001     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2002 }
2003 
2004 # SET INtersection, sorts its 2 inputs, then finds common lines
2005 setin() {
2006     # comm -12 <(sort "$1") <(sort "$2")
2007     # dash doesn't support the process-sub syntax
2008     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2009 }
2010 
2011 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2012 setsub() {
2013     # comm -23 <(sort "$1") <(sort "$2")
2014     # dash doesn't support the process-sub syntax
2015     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2016 }
2017 
2018 # Show Files (and folders), coloring folders and links; uses my script `nn`
2019 sf() {
2020     ls -al --file-type --color=never --time-style iso "$@" | awk '
2021         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2022         {
2023             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2024             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2025             printf "%6d  %s\n", NR - 1, $0; fflush()
2026         }
2027     ' | nn --gray | less -JMKiCRS
2028 }
2029 
2030 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2031 # uses my scripts `nn` and `cext`
2032 sfp() {
2033     ls -al --file-type --color=never --time-style iso "$@" | awk '
2034         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2035         {
2036             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2037             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2038             printf "%6d  %s\n", NR - 1, $0; fflush()
2039         }
2040     ' | nn --gray | cext | less -JMKiCRS
2041 }
2042 
2043 # SHell-run AWK output
2044 # shawk() { stdbuf -oL awk "$@" | sh; }
2045 
2046 # time-run various tools given one-per-line from stdin, giving them extra
2047 # common arguments passed as explicit arguments
2048 # showdown() {
2049 #     awk '
2050 #         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2051 #         {
2052 #             printf "%s", $0
2053 #             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2054 #             printf "\n"; fflush()
2055 #         }
2056 #     ' "$@" | xargs -d '\n' hyperfine
2057 # }
2058 
2059 # time-run various tools given one-per-line from stdin, giving them extra
2060 # common arguments passed as explicit arguments
2061 showdown() {
2062     awk '
2063         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2064         {
2065             printf "%s", $0
2066             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2067             printf "\n"; fflush()
2068         }
2069     ' "$@" | xargs -d '\n' hyperfine --style full
2070 }
2071 
2072 # SHOW a command, then RUN it
2073 showrun() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; }
2074 
2075 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2076 # single-filepaths compatible with `xargs`, since standard shell settings
2077 # get in the way of filepaths with spaces and other special symbols in them
2078 shquote() {
2079     awk '
2080         {
2081             s = $0
2082             gsub(/\r$/, "", s)
2083             gsub(/\\/, "\\\\", s)
2084             gsub(/"/, "\\\"", s)
2085             gsub(/\$/, "\\$", s)
2086             printf "\"%s\"\n", s; fflush()
2087         }
2088     ' "$@"
2089 }
2090 
2091 # skip the first n lines, or the 1st line by default
2092 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2093 
2094 # skip the first n bytes
2095 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2096 
2097 # skip the last n lines, or the last line by default
2098 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2099 
2100 # skip the last n bytes
2101 skiplastbytes() { head -c -"$1" "${2:--}"; }
2102 
2103 # skip the last n lines, or the last line by default
2104 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2105 
2106 # skip the first n lines, or the 1st line by default
2107 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2108 
2109 # SLOW/delay lines from the standard-input, waiting the number of seconds
2110 # given for each line, or waiting 1 second by default
2111 slow() {
2112     local seconds="${1:-1}"
2113     (
2114         IFS="$(printf "\n")"
2115         while read -r line; do
2116             sleep "${seconds}"
2117             printf "%s\n" "${line}"
2118         done
2119     )
2120 }
2121 
2122 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2123 slp() {
2124     local title
2125     title="Latest Podcast Episodes as of $(date +'%F %T')"
2126     podfeed -title "${title}" "$@" | si
2127 }
2128 
2129 # recursively find all files with fewer bytes than the number given
2130 smallfiles() {
2131     local n
2132     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2133     [ $# -gt 0 ] && shift
2134 
2135     local arg
2136     for arg in "${@:-.}"; do
2137         if [ ! -d "${arg}" ]; then
2138             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2139             return 1
2140         fi
2141         stdbuf -oL find "${arg}" -type f -size -"$n"c
2142     done
2143 }
2144 
2145 # emit the first line as is, sorting all lines after that, using the
2146 # `sort` command, passing all/any arguments/options to it
2147 sortrest() {
2148     awk -v sort="sort $*" '
2149         { gsub(/\r$/, "") }
2150         NR == 1 { print; fflush() }
2151         NR > 1 { print | sort }
2152     '
2153 }
2154 
2155 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2156 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2157 # all/any arguments/options to it
2158 sortsv() {
2159     awk -v sort="sort -t=$(printf '\t') $*" '
2160         { gsub(/\r$/, "") }
2161         NR == 1 { print; fflush() }
2162         NR > 1 { print | sort }
2163     '
2164 }
2165 
2166 # emit a line with the number of spaces given in it
2167 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2168 
2169 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2170 # in the middle of lines, as well as trailing carriage returns
2171 squeeze() {
2172     awk '
2173         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2174         {
2175             gsub(/^ +| *\r?$/, "")
2176             gsub(/ *\t */, "\t")
2177             gsub(/  +/, " ")
2178             print; fflush()
2179         }
2180     ' "$@"
2181 }
2182 
2183 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2184 # of multiple spaces in the middle of lines, as well as trailing carriage
2185 # returns, while also turning runs of empty lines into single empty lines,
2186 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2187 # lines vertically
2188 squomp() {
2189     awk '
2190         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2191         /^\r?$/ { empty = 1; next }
2192         empty { if (n > 0) print ""; empty = 0 }
2193         {
2194             gsub(/^ +| *\r?$/, "")
2195             gsub(/ *\t */, "\t")
2196             gsub(/  +/, " ")
2197             print; fflush()
2198             n++
2199         }
2200     ' "$@"
2201 }
2202 
2203 # Show a command, then Run it
2204 sr() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; }
2205 
2206 # turn runs of empty lines into single empty lines, effectively squeezing
2207 # paragraphs vertically, so to speak; runs of empty lines both at the start
2208 # and at the end are ignored
2209 stomp() {
2210     awk '
2211         /^\r?$/ { empty = 1; next }
2212         empty { if (n > 0) print ""; empty = 0 }
2213         { print; fflush(); n++ }
2214     ' "$@"
2215 }
2216 
2217 # STRike-thru (lines) with AWK
2218 strawk() {
2219     local cond="${1:-1}"
2220     [ $# -gt 0 ] && shift
2221     awk '
2222         { low = lower = tolower($0) }
2223         '"${cond}"' {
2224             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2225             printf "\x1b[9m%s\x1b[0m\n", $0
2226             fflush()
2227             next
2228         }
2229         { print; fflush() }
2230     ' "$@"
2231 }
2232 
2233 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2234 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2235 # all/any arguments/options to it
2236 stsv() {
2237     awk -v sort="sort -t=$(printf '\t') $*" '
2238         { gsub(/\r$/, "") }
2239         NR == 1 { print; fflush() }
2240         NR > 1 { print | sort }
2241     '
2242 }
2243 
2244 # use the result of the `awk` function `substr` for each line
2245 substr() {
2246     local start="${1:-1}"
2247     local length="${2:-80}"
2248     [ $# -gt 0 ] && shift
2249     [ $# -gt 0 ] && shift
2250     awk -v start="${start}" -v length="${length}" \
2251         '{ printf "%s\n", substr($0, start, length); fflush() }' "$@"
2252 }
2253 
2254 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2255 # what's given, before relinquishing existing privileges
2256 # sudoff() { [ $# -gt 0 ] && sudo "$@"; sudo -k; }
2257 
2258 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2259 # what's given, before relinquishing existing privileges
2260 # sudoff() {
2261 #     local code=0
2262 #     if [ $# -gt 0 ]; then
2263 #         sudo "$@"
2264 #         code=$?
2265 #     fi
2266 #     sudo -k
2267 #     return "${code}"
2268 # }
2269 
2270 # show a random command defined in `clam`, using `wat` from `clam` itself
2271 surprise() {
2272     wat "$(grep -E '^[a-z]+\(' "$(which clam)" | shuf -n 1 | sed -E 's-\(.*--')"
2273 }
2274 
2275 # Time the command given
2276 t() { time "$@"; }
2277 
2278 # show a reverse-sorted tally of all lines read, where ties are sorted
2279 # alphabetically
2280 tally() {
2281     awk -v sort="sort -t=$(printf '\t') -rnk2 -k1d" '
2282         # reassure users by instantly showing the header
2283         BEGIN { print "value\ttally"; fflush() }
2284         { gsub(/\r$/, ""); t[$0]++ }
2285         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2286     ' "$@"
2287 }
2288 
2289 # Tab AWK: TSV-specific I/O settings for `awk`
2290 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2291 
2292 # Tab AWK: TSV-specific I/O settings for `awk`
2293 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2294 
2295 # quick alias for my script `tbp`
2296 alias tb=tbp
2297 
2298 # Title ECHO changes the tab-title on your terminal app
2299 techo() { printf "\e]0;%s\a\n" "$*"; }
2300 
2301 # simulate the cadence of old-fashioned teletype machines, by slowing down
2302 # the output of ASCII/UTF-8 symbols from the standard-input
2303 teletype() {
2304     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2305         IFS="$(printf "\n")"
2306         while read -r line; do
2307             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2308                 while read -r item; do
2309                     sleep 0.01
2310                     printf "%s" "${item}"
2311                 done
2312             sleep 0.75
2313             printf "\n"
2314         done
2315     )
2316 }
2317 
2318 # run `top` without showing any of its output after quitting it
2319 tip() { tput smcup; top "$@"; tput rmcup; }
2320 
2321 # change the tab-title on your terminal app
2322 title() { printf "\e]0;%s\a\n" "$*"; }
2323 
2324 # quick alias for my script `tjp`
2325 alias tj=tjp
2326 
2327 # quick alias for my script `tlp`
2328 alias tl=tlp
2329 
2330 # show current date in a specifc format
2331 today() { date +'%Y-%m-%d %a %b %d'; }
2332 
2333 # get the first n lines, or 1 by default
2334 toline() { head -n "${1:-1}" "${2:--}"; }
2335 
2336 # lowercase all ASCII symbols
2337 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2338 
2339 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2340 # second by default: after the optional duration, the next optional arguments
2341 # are the volume and the tone-frequency; uses my script `waveout`
2342 tone() {
2343     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2344         mpv --really-quiet -
2345 }
2346 
2347 # get the processes currently using the most cpu
2348 topcpu() {
2349     local n="${1:-10}"
2350     [ "$n" -gt 0 ] && ps aux | awk '
2351         NR == 1 { print; fflush() }
2352         NR > 1 { print | "sort -rnk3" }
2353     ' | head -n "$(("$n" + 1))"
2354 }
2355 
2356 # show all files directly in the folder given, without looking any deeper
2357 topfiles() {
2358     local arg
2359     for arg in "${@:-.}"; do
2360         if [ ! -d "${arg}" ]; then
2361             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2362             return 1
2363         fi
2364         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2365     done
2366 }
2367 
2368 # show all files directly in folders given, along with their byte-counts
2369 topfilesizes() {
2370     local arg
2371     printf "file\tbytes\n"
2372     for arg in "${@:-.}"; do
2373         if [ ! -d "${arg}" ]; then
2374             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2375             return 1
2376         fi
2377         stdbuf -oL find "${arg}" -maxdepth 1 -type f -exec wc -c {} + |
2378             stdbuf -oL head -n -1 |
2379             sed -E -u 's-^ +--; s- +-\t-1; s-([^\t]*)\t(.*)-\2\t\1-'
2380     done
2381 }
2382 
2383 # show all folders directly in the folder given, without looking any deeper
2384 topfolders() {
2385     local arg
2386     for arg in "${@:-.}"; do
2387         if [ ! -d "${arg}" ]; then
2388             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2389             return 1
2390         fi
2391         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2392             awk '!/^\.$/ { print; fflush() }'
2393     done
2394 }
2395 
2396 # get the processes currently using the most memory
2397 topmemory() {
2398     local n="${1:-10}"
2399     [ "$n" -gt 0 ] && ps aux | awk '
2400         NR == 1 { print; fflush() }
2401         NR > 1 { print | "sort -rnk6" }
2402     ' | head -n "$(("$n" + 1))"
2403 }
2404 
2405 # transpose (switch) rows and columns from tables
2406 transpose() {
2407     awk '
2408         { gsub(/\r$/, "") }
2409 
2410         NR == 1 && /\t/ { FS = "\t"; $0 = $0 }
2411 
2412         {
2413             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2414             if (maxitems < NF) maxitems = NF
2415         }
2416 
2417         END {
2418             for (j = 1; j <= maxitems; j++) {
2419                 for (i = 1; i <= NR; i++) {
2420                     if (i > 1) printf "\t"
2421                     printf "%s", lines[j][i]
2422                 }
2423                 printf "\n"
2424             }
2425         }
2426     ' "$@"
2427 }
2428 
2429 # ignore leading/trailing spaces, as well as trailing carriage returns
2430 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2431 
2432 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2433 # decimal dots themselves, when decimals in a number are all zeros; works
2434 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2435 # trimdecs() {
2436 #     awk '
2437 #         {
2438 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2439 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2440 #             print; fflush()
2441 #         }
2442 #     ' "$@"
2443 # }
2444 
2445 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2446 # decimal dots themselves, when decimals in a number are all zeros
2447 trimdecs() {
2448     awk '{ print; fflush() }' "$@" |
2449         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2450 }
2451 
2452 # ignore trailing spaces, as well as trailing carriage returns
2453 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2454 
2455 # ignore trailing spaces, as well as trailing carriage returns
2456 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2457 
2458 # ignore leading/trailing spaces, as well as trailing carriage returns
2459 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2460 
2461 # ignore leading/trailing spaces, as well as trailing carriage returns
2462 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2463 
2464 # ignore trailing spaces, as well as trailing carriage returns
2465 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2466 
2467 # ignore trailing spaces, as well as trailing carriage returns
2468 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2469 
2470 # try running a command, emitting an explicit message to standard-error
2471 # if the command given fails
2472 try() {
2473     "$@" || {
2474         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2475         return 255
2476     }
2477 }
2478 
2479 # Transform Strings with Python; uses my script `tbp`
2480 tsp() { tbp -s "$@"; }
2481 
2482 # run the command given, trying to turn its output into TSV (tab-separated
2483 # values); uses my script `dejson`
2484 tsvrun() { jc "$@" | dejson; }
2485 
2486 # deduplicate lines, keeping them in their original order
2487 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2488 
2489 # concatenate all named input sources unix-style: all trailing CRLFs become
2490 # single LFs, each non-empty input will always end in a LF, so lines from
2491 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2492 # first line of each input are ignored, as those are useless at best
2493 unixify() {
2494     awk '
2495         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2496         { gsub(/\r$/, ""); print; fflush() }
2497     ' "$@"
2498 }
2499 
2500 # go UP n folders, or go up 1 folder by default
2501 up() {
2502     if [ "${1:-1}" -le 0 ]; then
2503         cd .
2504         return $?
2505     fi
2506 
2507     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2508 }
2509 
2510 # convert United States Dollars into CAnadian Dollars, using the latest
2511 # official exchange rates from the bank of canada; during weekends, the
2512 # latest rate may be from a few days ago; the default amount of usd to
2513 # convert is 1, when not given
2514 usd2cad() {
2515     local site='https://www.bankofcanada.ca/valet/observations/group'
2516     local csv_rates="${site}/FX_RATES_DAILY/csv"
2517     local url
2518     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2519     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2520         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2521         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2522 }
2523 
2524 # View with `less`
2525 v() { less -JMKiCRS "$@"; }
2526 
2527 # run a command, showing its success/failure right after
2528 verdict() {
2529     local code
2530     "$@"
2531     code=$?
2532 
2533     if [ "${code}" -eq 0 ]; then
2534         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2535     else
2536         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2537     fi
2538     return "${code}"
2539 }
2540 
2541 # check shell scripts for common gotchas, avoiding complaints about using
2542 # the `local` keyword, which is widely supported in practice
2543 vetshell() { shellcheck -e 3043 "$@"; }
2544 
2545 # View with Header runs `less` without line numbers, with ANSI styles, no
2546 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2547 # so they always show on top
2548 vh() {
2549     local n="${1:-1}"
2550     [ $# -gt 0 ] && shift
2551     less --header="$n" -JMKiCRS "$@"
2552 }
2553 
2554 # View Nice Columns; uses my scripts `realign` and `nn`
2555 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2556 
2557 # View Nice Hexadecimals; uses my script `nh`
2558 vnh() { nh "$@" | less -JMKiCRS; }
2559 
2560 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2561 vnj() { nj "$@" | less -JMKiCRS; }
2562 
2563 # View Nice Numbers; uses my script `nn`
2564 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2565 
2566 # View Nice Table / Very Nice Table; uses my app `nt`
2567 # vnt() {
2568 #     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2569 #         nt |
2570 #         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2571 #         less -JMKiCRS
2572 # }
2573 
2574 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2575 vnt() {
2576     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2577         nt | nn --gray |
2578         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2579         less -JMKiCRS
2580 }
2581 
2582 # View Text with `less`
2583 # vt() { less -JMKiCRS "$@"; }
2584 
2585 # View Very Nice Json; uses my scripts `nj` and `nn`
2586 vvnj() { nj "$@" | nn --gray | less -JMKiCRS; }
2587 
2588 # What are these (?); uses my command `nwat`
2589 # alias w=nwat
2590 
2591 # What Are These (?) shows what the names given to it are/do
2592 wat() {
2593     local a
2594 
2595     if [ $# -eq 0 ]; then
2596         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2597         return 1
2598     fi
2599 
2600     for a in "$@"; do
2601         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2602         printf "\e[7m%-80s\e[0m\n" "$a"
2603 
2604         # resolve 1 alias level
2605         if alias "$a" 2> /dev/null > /dev/null; then
2606             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2607         fi
2608 
2609         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2610             # resolved aliases with args/spaces in them would otherwise fail
2611             echo "$a"
2612         elif whence -f "$a" > /dev/null 2> /dev/null; then
2613             # zsh seems to show a shell function's code only via `whence -f`
2614             whence -f "$a"
2615         elif type "$a" > /dev/null 2> /dev/null; then
2616             # dash doesn't support `declare`, and `type` in bash emits
2617             # a redundant first output line, when it's a shell function
2618             type "$a" | awk '
2619                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2620                 { print; fflush() }
2621                 END { if (NR < 2 && skipped) print skipped }
2622             '
2623         else
2624             printf "\e[31m%s not found\e[0m\n" "$a"
2625         fi
2626     done | less -JMKiCRS
2627 }
2628 
2629 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2630 # lines instead
2631 # wctsv() {
2632 #     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2633 #     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2634 #         's-^ +--; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' |
2635 #         awk -F "\t" '
2636 #             BEGIN { fmt = "%s\t%s\t%s\t%s\t%s\t%s\n" }
2637 #             NR > 1 { printf fmt, f, b, l, c, w, mw; fflush() }
2638 #             { f = $6; b = $4; l = $1; c = $3; w = $2; mw = $5 }
2639 #             END { if (NR == 1 || f != "total") printf fmt, f, b, l, c, w, mw }
2640 #         '
2641 # }
2642 
2643 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2644 # lines instead
2645 wctsv() {
2646     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2647     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2648         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2649         awk '
2650             NR > 1 { print prev; fflush() }
2651             { prev = $0 }
2652             END { if (NR == 1 || !/^total\t/) print }
2653         '
2654 }
2655 
2656 # get weather forecasts, almost filling the terminal's current width
2657 # weather() {
2658 #     finger "${*}~$(($(tput cols) - 2))@graph.no" |
2659 #         sed -u -E '/^\[/d; s/-/@/g; s/^( *)@/\1-/; s/^ +@=/ -=/; s/=@ *$/=-/'
2660 # }
2661 
2662 # get weather forecasts, almost filling the terminal's current width
2663 weather() {
2664     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2665         curl --show-error -s telnet://graph.no:79 |
2666         sed -u -E '/^\[/d; s/-/@/g; s/^( *)@/\1-/; s/^ +@=/ -=/; s/=@ *$/=-/'
2667 }
2668 
2669 # recursively find all files with trailing spaces/CRs
2670 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2671 
2672 # recursively find all files with trailing spaces/CRs
2673 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2674 
2675 # run `xargs`, using whole lines as extra arguments
2676 x() { xargs -d '\n' "$@"; }
2677 
2678 # run `xargs`, using zero/null bytes as the extra-arguments terminator
2679 x0() { xargs -0 "$@"; }
2680 
2681 # run `xargs`, using whole lines as extra arguments
2682 xl() { xargs -d '\n' "$@"; }
2683 
2684 # Youtube Audio Player
2685 yap() {
2686     local url
2687     # some youtube URIs end with extra playlist/tracker parameters
2688     url="$(echo "$1" | sed 's-&.*--')"
2689     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2690 }
2691 
2692 # show a calendar for the current year, or for the year given
2693 year() {
2694     {
2695         # show the current date/time center-aligned
2696         printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
2697             "" "$(date +'%a %b %d')" "$(date +%T)"
2698         # show a whole-year calendar
2699         cal -y "$@"
2700     } | less -JMKiCRS
2701 }
2702 
2703 # show the current date in the YYYY-MM-DD format
2704 ymd() { date +'%Y-%m-%d'; }
2705 
2706 # YouTube Url
2707 ytu() {
2708     local url
2709     # some youtube URIs end with extra playlist/tracker parameters
2710     url="$(echo "$1" | sed 's-&.*--')"
2711     [ $# -gt 0 ] && shift
2712     yt-dlp "$@" --get-url "${url}"
2713 }