File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 # handle help options
  55 case "$1" in
  56     -h|--h|-help|--help)
  57         # show help message, using the info-comment from this very script
  58         awk '
  59             /^case / { exit }
  60             /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print }
  61         ' "$0"
  62         exit 0
  63     ;;
  64 esac
  65 
  66 
  67 # dash doesn't support regex-matching syntax, forcing to use case statements
  68 case "$0" in
  69     -bash|-dash|-sh|bash|dash|sh)
  70         # script is being sourced with bash or dash, which is good
  71         :
  72     ;;
  73     *)
  74         case "$ZSH_EVAL_CONTEXT" in
  75             *:file)
  76                 # script is being sourced with zsh, which is good
  77                 :
  78             ;;
  79             *)
  80                 # script is being run normally, which is a waste of time
  81 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  82 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  83                 # failing during shell-startup may deny shell access, so exit
  84                 # with a 0 error-code to declare success
  85                 exit 0
  86             ;;
  87         esac
  88     ;;
  89 esac
  90 
  91 
  92 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  93 c1() { bsbs 1 "$@"; }
  94 c2() { bsbs 2 "$@"; }
  95 c3() { bsbs 3 "$@"; }
  96 c4() { bsbs 4 "$@"; }
  97 c5() { bsbs 5 "$@"; }
  98 c6() { bsbs 6 "$@"; }
  99 c7() { bsbs 7 "$@"; }
 100 c8() { bsbs 8 "$@"; }
 101 c9() { bsbs 9 "$@"; }
 102 
 103 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 104 alias 1=c1
 105 alias 2=c2
 106 alias 3=c3
 107 alias 4=c4
 108 alias 5=c5
 109 alias 6=c6
 110 alias 7=c7
 111 alias 8=c8
 112 alias 9=c9
 113 
 114 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 115 alias 1c=c1
 116 alias 2c=c2
 117 alias 3c=c3
 118 alias 4c=c4
 119 alias 5c=c5
 120 alias 6c=c6
 121 alias 7c=c7
 122 alias 8c=c8
 123 alias 9c=c9
 124 
 125 # Avoid/ignore lines which match any of the regexes given
 126 a() {
 127     awk '
 128         BEGIN {
 129             for (i = 1; i < ARGC; i++) {
 130                 e[i] = ARGV[i]
 131                 delete ARGV[i]
 132             }
 133         }
 134 
 135         {
 136             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 137             print; fflush()
 138             got++
 139         }
 140 
 141         END { exit(got == 0) }
 142     ' "${@:-^\r?$}"
 143 }
 144 
 145 # find name from the local `apt` database of installable packages
 146 # aptfind() {
 147 #     # despite warnings, the `apt search` command has been around for years
 148 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 149 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | sed -u 's/^--$//'
 150 # }
 151 
 152 # emit each argument given as its own line of output
 153 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 154 
 155 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 156 # their basic ASCII counterparts, and where non-ASCII symbols become question
 157 # marks, one question mark for each code-point byte
 158 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 159 
 160 # avoid/ignore lines which match any of the regexes given
 161 avoid() {
 162     awk '
 163         BEGIN {
 164             for (i = 1; i < ARGC; i++) {
 165                 e[i] = ARGV[i]
 166                 delete ARGV[i]
 167             }
 168         }
 169 
 170         {
 171             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 172             print; fflush()
 173             got++
 174         }
 175 
 176         END { exit(got == 0) }
 177     ' "${@:-^\r?$}"
 178 }
 179 
 180 # AWK Begin
 181 # awkb() { awk "BEGIN { $1; exit }"; }
 182 
 183 # AWK Begin
 184 awkb() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 185 
 186 # emit a line with a repeating ball-like symbol in it
 187 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 188 
 189 # show an ansi-styled BANNER-like line
 190 banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 191 
 192 # emit a colored bar which can help visually separate different outputs
 193 bar() {
 194     [ "${1:-80}" -gt 0 ] &&
 195         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 196 }
 197 
 198 # process Blocks/paragraphs of non-empty lines with AWK
 199 # bawk() { awk -F='' -v RS='' "$@"; }
 200 
 201 # process Blocks/paragraphs of non-empty lines with AWK
 202 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 203 
 204 # play a repeating and annoying high-pitched beep sound a few times a second,
 205 # lasting the number of seconds given, or for 1 second by default; uses my
 206 # script `waveout`
 207 beeps() {
 208     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 209     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 210 }
 211 
 212 # start by joining all arguments given as a tab-separated-items line of output,
 213 # followed by all lines from stdin verbatim
 214 begintsv() {
 215     awk '
 216         BEGIN {
 217             for (i = 1; i < ARGC; i++) {
 218                 if (i > 1) printf "\t"
 219                 printf "%s", ARGV[i]
 220                 delete ARGV[i]
 221             }
 222             if (ARGC > 1) printf "\n"
 223             fflush()
 224         }
 225         { print; fflush() }
 226     ' "$@"
 227 }
 228 
 229 # play a repeating synthetic-bell-like sound lasting the number of seconds
 230 # given, or for 1 second by default; uses my script `waveout`
 231 bell() {
 232     local f='sin(880*tau*u) * exp(-10*u)'
 233     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 234 }
 235 
 236 # play a repeating sound with synthetic-bells, lasting the number of seconds
 237 # given, or for 1 second by default; uses my script `waveout`
 238 bells() {
 239     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 240     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 241 }
 242 
 243 # Breathe Header: add an empty line after the first one (the header), then
 244 # separate groups of 5 lines (by default) with empty lines between them
 245 bh() {
 246     local n="${1:-5}"
 247     [ $# -gt 0 ] && shift
 248     awk -v n="$n" '
 249         BEGIN { if (n == 0) n = -1 }
 250         (NR - 1) % n == 1 && NR > 1 { print "" }
 251         { print; fflush() }
 252     ' "$@"
 253 }
 254 
 255 # recursively find all files with at least the number of bytes given; when
 256 # not given a minimum byte-count, the default is 100 binary megabytes
 257 bigfiles() {
 258     local n
 259     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 260     [ $# -gt 0 ] && shift
 261 
 262     local arg
 263     for arg in "${@:-.}"; do
 264         if [ ! -d "${arg}" ]; then
 265             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 266             return 1
 267         fi
 268         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 269     done
 270 }
 271 
 272 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 273 bl() {
 274     local n="${1:-5}"
 275     [ $# -gt 0 ] && shift
 276     awk -v n="$n" '
 277         BEGIN { if (n == 0) n = -1 }
 278         NR % n == 1 && NR != 1 { print "" }
 279         { print; fflush() }
 280     ' "$@"
 281 }
 282 
 283 # process BLocks/paragraphs of non-empty lines with AWK
 284 # blawk() { awk -F='' -v RS='' "$@"; }
 285 
 286 # process BLocks/paragraphs of non-empty lines with AWK
 287 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 288 
 289 # emit a line with a repeating block-like symbol in it
 290 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 291 
 292 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 293 # my script `bsbs`
 294 bman() {
 295     local w
 296     w="$(tput cols)"
 297     if [ "$w" -gt 120 ]; then
 298         w="$((w / 2 - 1))"
 299     fi
 300     MANWIDTH="$w" man "$@" | bsbs 2
 301 }
 302 
 303 # Begin-Only Awk
 304 # boa() { awk "BEGIN { $1; exit }"; }
 305 
 306 # Begin-Only Awk
 307 boa() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 308 
 309 # Begin-Only AWK
 310 # boawk() { awk "BEGIN { $1; exit }"; }
 311 
 312 # Begin-Only AWK
 313 boawk() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 314 
 315 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 316 # my script `bsbs`
 317 bookman() {
 318     local w
 319     w="$(tput cols)"
 320     if [ "$w" -gt 120 ]; then
 321         w="$((w / 2 - 1))"
 322     fi
 323     MANWIDTH="$w" man "$@" | bsbs 2
 324 }
 325 
 326 # split lines using the regex given, turning them into single-item lines
 327 breakdown() {
 328     local sep="${1:- }"
 329     [ $# -gt 0 ] && shift
 330     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 331 }
 332 
 333 # separate groups of 5 lines (by default) with empty lines
 334 breathe() {
 335     local n="${1:-5}"
 336     [ $# -gt 0 ] && shift
 337     awk -v n="$n" '
 338         BEGIN { if (n == 0) n = -1 }
 339         NR % n == 1 && NR != 1 { print "" }
 340         { print; fflush() }
 341     ' "$@"
 342 }
 343 
 344 # Browse Text
 345 bt() { less -JMKNiCRS "$@"; }
 346 
 347 # show a reverse-sorted tally of all lines read, where ties are sorted
 348 # alphabetically, and where trailing bullets are added to quickly make
 349 # the tally counts comparable at a glance
 350 bully() {
 351     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 352         # reassure users by instantly showing the header
 353         BEGIN { print "value\ttally\tbullets"; fflush() }
 354 
 355         { gsub(/\r$/, ""); tally[$0]++ }
 356 
 357         END {
 358             # find the max tally, which is needed to build the bullets-string
 359             max = 0
 360             for (k in tally) {
 361                 if (max < tally[k]) max = tally[k]
 362             }
 363 
 364             # make enough bullets for all tallies: this loop makes growing the
 365             # string a task with complexity O(n * log n), instead of a naive
 366             # O(n**2), which can slow-down things when tallies are high enough
 367             bullets = "•"
 368             for (n = max; n > 1; n /= 2) {
 369                 bullets = bullets bullets
 370             }
 371 
 372             # emit unsorted output lines to the sort cmd, which will emit the
 373             # final reverse-sorted tally lines
 374             for (k in tally) {
 375                 s = substr(bullets, 1, tally[k])
 376                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 377             }
 378         }
 379     ' "$@"
 380 }
 381 
 382 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 383 # second by default; uses my script `waveout`
 384 busy() {
 385     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 386     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 387     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 388     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 389 }
 390 
 391 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 392 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 393 
 394 # keep all BUT the LAST n lines, or skip just the last line by default
 395 butlast() { head -n -"${1:-1}" "${2:--}"; }
 396 
 397 # load bytes from the filenames given
 398 bytes() { cat "$@"; }
 399 
 400 # quick alias for `cat`
 401 c() { cat "$@"; }
 402 
 403 # CAlculator with Nice numbers runs my script `ca` and colors results with
 404 # my script `nn`, alternating styles to make long numbers easier to read
 405 can() { ca "$@" | nn --gray; }
 406 
 407 # conCATenate Lines guarantees no lines are ever accidentally joined
 408 # across inputs, always emitting a line-feed at the end of every line
 409 # catl() { awk '{ print; fflush() }' "$@"; }
 410 
 411 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 412 # carriage-returns, and guarantees no lines are ever accidentally joined
 413 # across inputs, always emitting a line-feed at the end of every line
 414 catl() {
 415     awk '
 416         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 417         { gsub(/\r$/, ""); print; fflush() }
 418     ' "$@"
 419 }
 420 
 421 # Csv AWK: CSV-specific input settings for `awk`
 422 # cawk() { awk --csv "$@"; }
 423 
 424 # Csv AWK: CSV-specific input settings for `awk`
 425 cawk() { stdbuf -oL awk --csv "$@"; }
 426 
 427 # Compile C Stripped
 428 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 429 
 430 # center-align lines of text, using the current screen width
 431 center() {
 432     awk -v width="$(tput cols)" '
 433         {
 434             gsub(/\r$/, "")
 435             lines[NR] = $0
 436             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
 437             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
 438             l = length
 439             if (maxlen < l) maxlen = l
 440         }
 441 
 442         END {
 443             n = (width - maxlen) / 2
 444             if (n % 1) n = n - (n % 1)
 445             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
 446             for (i = 1; i <= NR; i++) printf fmt, "", lines[i]
 447         }
 448     ' "$@"
 449 }
 450 
 451 # Colored Go Test on the folder given; uses my command `gbmawk`
 452 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; }
 453 
 454 # ignore final life-feed from text, if it's the very last byte; also ignore
 455 # all trailing carriage-returns
 456 choplf() {
 457     awk '
 458         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 459         NR > 1 { print ""; fflush() }
 460         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 461     ' "$@"
 462 }
 463 
 464 # Color Json using the `jq` app, allowing an optional filepath as the data
 465 # source, and even an optional transformation formula
 466 cj() { jq -C "${2:-.}" "${1:--}"; }
 467 
 468 # clean the screen, after running the command given
 469 # clean() { tput smcup; "$@"; tput rmcup; }
 470 
 471 # show a live digital clock
 472 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 473 
 474 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 475 # also emitting colors when piped
 476 clrg() { rg --color=always --line-buffered "$@"; }
 477 
 478 # CLear Screen, like the old dos command of the same name
 479 cls() { clear; }
 480 
 481 # COunt COndition: count how many times the AWK expression given is true
 482 coco() {
 483     local cond="${1:-1}"
 484     [ $# -gt 0 ] && shift
 485     awk "
 486         { low = lower = tolower(\$0) }
 487         ${cond} { count++ }
 488         END { print count }
 489     " "$@"
 490 }
 491 
 492 # Colored RipGrep ensures app `rg` emits colors when piped
 493 crg() { rg --color=always --line-buffered "$@"; }
 494 
 495 # emit a line with a repeating cross-like symbol in it
 496 crosses() {
 497     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 498 }
 499 
 500 # split lines using the string given, turning them into single-item lines
 501 crumble() {
 502     local sep="${1:- }"
 503     [ $# -gt 0 ] && shift
 504     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 505 }
 506 
 507 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 508 csv2tsv() { xsv fmt -t '\t' "$@"; }
 509 
 510 # Change Units turns common US units into international ones; uses my
 511 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 512 cu() {
 513     bu "$@" | awk '
 514         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 515         NF == 4 && $NF != "s" { print $NF }
 516     ' | nn --gray
 517 }
 518 
 519 # CURL Silent spares you the progress bar, but still tells you about errors
 520 curls() { curl --show-error -s "$@"; }
 521 
 522 # Count With AWK: count the times the AWK expression/condition given is true
 523 cwawk() {
 524     local cond="${1:-1}"
 525     [ $# -gt 0 ] && shift
 526     awk "
 527         { low = lower = tolower(\$0) }
 528         ${cond} { count++ }
 529         END { print count }
 530     " "$@"
 531 }
 532 
 533 # listen to streaming DANCE music
 534 dance() {
 535     printf "streaming \e[7mDance Wave Retro\e[0m\n"
 536     # mpv --quiet https://retro.dancewave.online/retrodance.mp3
 537     mpv --really-quiet https://retro.dancewave.online/retrodance.mp3
 538 }
 539 
 540 # emit a line with a repeating dash-like symbol in it
 541 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 542 
 543 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 544 # the leading data-URI declaration, if present
 545 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 546 
 547 # DECAPitate (lines) emits the first line as is, piping all lines after that
 548 # to the command given, passing all/any arguments/options to it
 549 # decap() {
 550 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 551 # }
 552 
 553 # turn Comma-Separated-Values tables into tab-separated-values tables
 554 # decsv() { xsv fmt -t '\t' "$@"; }
 555 
 556 # DEDUPlicate prevents lines from appearing more than once
 557 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 558 
 559 # dictionary-DEFine the word given, using an online service
 560 def() {
 561     local arg
 562     local gap=0
 563     for arg in "$@"; do
 564         [ "${gap}" -gt 0 ] && printf "\n"
 565         gap=1
 566         printf "\x1b[7m%-80s\x1b[0m\n" "${arg}"
 567         curl -s "dict://dict.org/d:${arg}" | awk '
 568             { gsub(/\r$/, "") }
 569             /^151 / {
 570                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 571                 next
 572             }
 573             /^[1-9][0-9]{2} / {
 574                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 575                 next
 576             }
 577             { print; fflush() }
 578         '
 579     done | less -JMKiCRS
 580 }
 581 
 582 # dictionary-define the word given, using an online service
 583 define() {
 584     local arg
 585     local gap=0
 586     for arg in "$@"; do
 587         [ "${gap}" -gt 0 ] && printf "\n"
 588         gap=1
 589         printf "\x1b[7m%-80s\x1b[0m\n" "${arg}"
 590         curl -s "dict://dict.org/d:${arg}" | awk '
 591             { gsub(/\r$/, "") }
 592             /^151 / {
 593                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 594                 next
 595             }
 596             /^[1-9][0-9]{2} / {
 597                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 598                 next
 599             }
 600             { print; fflush() }
 601         '
 602     done | less -JMKiCRS
 603 }
 604 
 605 # DEcompress GZip-encoded data
 606 # degz() { zcat "$@"; }
 607 
 608 # turn JSON Lines into a proper json array
 609 dejsonl() { jq -s -M "${@:-.}"; }
 610 
 611 # delay lines from the standard-input, waiting the number of seconds given
 612 # for each line, or waiting 1 second by default
 613 # delay() {
 614 #     local seconds="${1:-1}"
 615 #     (
 616 #         IFS="$(printf "\n")"
 617 #         while read -r line; do
 618 #             sleep "${seconds}"
 619 #             printf "%s\n" "${line}"
 620 #         done
 621 #     )
 622 # }
 623 
 624 # expand tabs each into up to the number of space given, or 4 by default
 625 detab() { expand -t "${1:-4}"; }
 626 
 627 # ignore trailing spaces, as well as trailing carriage returns
 628 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 629 
 630 # turn UTF-16 data into UTF-8
 631 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 632 
 633 # DIVide 2 numbers 3 ways, including the complement
 634 div() {
 635     awk -v a="${1:-1}" -v b="${2:-1}" '
 636         BEGIN {
 637             gsub(/_/, "", a)
 638             gsub(/_/, "", b)
 639             if (a > b) { c = a; a = b; b = c }
 640             c = 1 - a / b
 641             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 642             else printf "%f\n%f\n", a / b, b / a
 643             exit
 644         }'
 645 }
 646 
 647 # get/fetch data from the filename or URI given; named `dog` because dogs can
 648 # `fetch` things for you
 649 # dog() {
 650 #     if [ $# -gt 1 ]; then
 651 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 652 #         return 1
 653 #     fi
 654 #
 655 #     if [ -e "$1" ]; then
 656 #         cat "$1"
 657 #         return $?
 658 #     fi
 659 #
 660 #     case "${1:--}" in
 661 #         -) cat -;;
 662 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 663 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 664 #         dict://*|telnet://*) curl --show-error -s "$1";;
 665 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 666 #         *) curl --show-error -s "https://$1";;
 667 #     esac 2> /dev/null || {
 668 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 669 #         return 1
 670 #     }
 671 # }
 672 
 673 # emit a line with a repeating dot-like symbol in it
 674 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 675 
 676 # ignore/remove all matched regexes given on all stdin lines
 677 drop() {
 678     awk '
 679         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 680         {
 681             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 682             print; fflush()
 683         }
 684     ' "${@:-\r$}"
 685 }
 686 
 687 # show the current Date and Time
 688 dt() {
 689     printf "\e[32m%s\e[0m  \e[34m%s\e[0m\n" "$(date +'%a %b %d')" "$(date +%T)"
 690 }
 691 
 692 # show the current Date, Time, and a Calendar with the 3 `current` months
 693 dtc() {
 694     {
 695         # show the current date/time center-aligned
 696         printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
 697             "" "$(date +'%a %b %d')" "$(date +%T)"
 698         # debian linux has a different `cal` app which highlights the day
 699         if [ -e "/usr/bin/ncal" ]; then
 700             # fix debian/ncal's weird way to highlight the current day
 701             ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
 702         else
 703             cal -3
 704         fi
 705     } | less -JMKiCRS
 706 }
 707 
 708 # quick alias for `echo`
 709 e() { echo "$@"; }
 710 
 711 # Evaluate Awk expression
 712 ea() {
 713     local expr="${1:-0}"
 714     [ $# -gt 0 ] && shift
 715     awk "BEGIN { print ${expr}; exit }" "$@"
 716 }
 717 
 718 # Extended-mode Grep, enabling its full regex syntax
 719 eg() { grep -E --line-buffered "$@"; }
 720 
 721 # Extended Grep, Recursive Interactive and Plain
 722 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 723 
 724 # show all empty files in a folder, digging recursively
 725 emptyfiles() {
 726     local arg
 727     for arg in "${@:-.}"; do
 728         if [ ! -d "${arg}" ]; then
 729             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 730             return 1
 731         fi
 732         stdbuf -oL find "${arg}" -type f -size 0c
 733     done
 734 }
 735 
 736 # Evaluate Nodejs expression
 737 # en() {
 738 #     local expr="${1:-null}"
 739 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 740 #     node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 741 # }
 742 
 743 # Evaluate Python expression
 744 ep() { python -c "print(${1:-None})"; }
 745 
 746 # Extended Plain Interactive Grep
 747 epig() { ugrep --color=never -Q -E "$@"; }
 748 
 749 # Extended Plain Recursive Interactive Grep
 750 eprig() { ugrep --color=never -Q -E "$@"; }
 751 
 752 # Evaluate Ruby expression
 753 er() { ruby -e "puts ${1:-nil}"; }
 754 
 755 # ignore/remove all matched regexes given on all stdin lines
 756 erase() {
 757     awk '
 758         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 759         {
 760             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 761             print; fflush()
 762         }
 763     ' "${@:-\r$}"
 764 }
 765 
 766 # Editor Read-Only
 767 ero() { micro -readonly true "$@"; }
 768 
 769 # Extended-mode Sed, enabling its full regex syntax
 770 es() { sed -E -u "$@"; }
 771 
 772 # convert EURos into CAnadian Dollars, using the latest official exchange
 773 # rates from the bank of canada; during weekends, the latest rate may be
 774 # from a few days ago; the default amount of euros to convert is 1, when
 775 # not given
 776 eur2cad() {
 777     local site='https://www.bankofcanada.ca/valet/observations/group'
 778     local csv_rates="${site}/FX_RATES_DAILY/csv"
 779     local url
 780     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 781     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 782         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 783         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 784 }
 785 
 786 # EValuate AWK expression
 787 evawk() {
 788     local expr="${1:-0}"
 789     [ $# -gt 0 ] && shift
 790     awk "BEGIN { print ${expr}; exit }" "$@"
 791 }
 792 
 793 # convert fahrenheit into celsius
 794 fahrenheit() {
 795     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 796         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 797 }
 798 
 799 # Flushed AWK
 800 fawk() { stdbuf -oL awk "$@"; }
 801 
 802 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 803 fetch() {
 804     local a
 805     for a in "$@"; do
 806         case "$a" in
 807             file://*|https://*|http://*) curl --show-error -s "$a";;
 808             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 809             dict://*|telnet://*) curl --show-error -s "$a";;
 810             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 811             *) curl --show-error -s "https://$a";;
 812         esac
 813     done
 814 }
 815 
 816 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 817 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 818 
 819 # show all files in a folder, digging recursively
 820 files() {
 821     local arg
 822     for arg in "${@:-.}"; do
 823         if [ ! -d "${arg}" ]; then
 824             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 825             return 1
 826         fi
 827         stdbuf -oL find "${arg}" -type f
 828     done
 829 }
 830 
 831 # recursively find all files with fewer bytes than the number given
 832 filesunder() {
 833     local n
 834     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 835     [ $# -gt 0 ] && shift
 836 
 837     local arg
 838     for arg in "${@:-.}"; do
 839         if [ ! -d "${arg}" ]; then
 840             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 841             return 1
 842         fi
 843         stdbuf -oL find "${arg}" -type f -size -"$n"c
 844     done
 845 }
 846 
 847 # get the first n lines, or 1 by default
 848 first() { head -n "${1:-1}" "${2:--}"; }
 849 
 850 # limit data up to the first n bytes
 851 firstbytes() { head -c "$1" "${2:--}"; }
 852 
 853 # get the first n lines, or 1 by default
 854 firstlines() { head -n "${1:-1}" "${2:--}"; }
 855 
 856 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 857 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 858 # and ensuring each input's last line ends with a line-feed
 859 fixlines() {
 860     awk '
 861         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 862         { gsub(/\r$/, ""); print; fflush() }
 863     ' "$@"
 864 }
 865 
 866 # FLushed AWK
 867 # flawk() { stdbuf -oL awk "$@"; }
 868 
 869 # First Line AWK, emits the first line as is, and uses the rest of the args
 870 # given by injecting the first into the script, and passing all later args as
 871 # later args to `awk` as given
 872 flawk() {
 873     local code="${1:-1}"
 874     [ $# -gt 0 ] && shift
 875     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 876 }
 877 
 878 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 879 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 880 # involving several steps
 881 fleak() {
 882     awk '
 883         {
 884             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 885             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 886             print; fflush()
 887         }
 888     ' "$@"
 889 }
 890 
 891 # try to run the command given using line-buffering for its (standard) output
 892 flushlines() { stdbuf -oL "$@"; }
 893 
 894 # show all folders in a folder, digging recursively
 895 folders() {
 896     local arg
 897     for arg in "${@:-.}"; do
 898         if [ ! -d "${arg}" ]; then
 899             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 900             return 1
 901         fi
 902         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 903     done
 904 }
 905 
 906 # start from the line number given, skipping all previous ones
 907 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 908 
 909 # convert FeeT into meters
 910 ft() {
 911     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 912         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 913 }
 914 
 915 # convert FeeT² (squared) into meters²
 916 ft2() {
 917     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 918         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 919 }
 920 
 921 # Get/fetch data from the filenames/URIs given; uses my script `get`
 922 # g() { get "$@"; }
 923 
 924 # run `grep` in extended-regex mode, enabling its full regex syntax
 925 # g() { grep -E --line-buffered "$@"; }
 926 
 927 # convert GALlons into liters
 928 gal() {
 929     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 930         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 931 }
 932 
 933 # convert binary GigaBytes into bytes
 934 gb() {
 935     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 936         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 937         sed 's-\.00*$--'
 938 }
 939 
 940 # glue/stick together various lines, only emitting a line-feed at the end; an
 941 # optional argument is the output-item-separator, which is empty by default
 942 glue() {
 943     local sep="${1:-}"
 944     [ $# -gt 0 ] && shift
 945     awk -v sep="${sep}" '
 946         NR > 1 { printf "%s", sep }
 947         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 948         END { if (NR > 0) print ""; fflush() }
 949     ' "$@"
 950 }
 951 
 952 # GO Build Stripped: a common use-case for the go compiler
 953 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 954 
 955 # GO DEPendencieS: show all dependencies in a go project
 956 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 957 
 958 # GO IMPortS: show all imports in a go project
 959 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
 960 
 961 # go to the folder picked using an interactive TUI; uses my script `bf`
 962 goto() {
 963     local where
 964     where="$(bf "${1:-.}")"
 965     if [ $? -ne 0 ]; then
 966         return 0
 967     fi
 968 
 969     where="$(realpath "${where}")"
 970     if [ ! -d "${where}" ]; then
 971         where="$(dirname "${where}")"
 972     fi
 973     cd "${where}" || return
 974 }
 975 
 976 # GRayed-out lines with AWK
 977 grawk() {
 978     local cond="${1:-1}"
 979     [ $# -gt 0 ] && shift
 980     awk "${cond}"' {
 981             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 982             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 983             next
 984         }
 985         { print; fflush() }
 986     ' "$@"
 987 }
 988 
 989 # Style lines using a GRAY-colored BACKground
 990 grayback() {
 991     awk '
 992         {
 993             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
 994             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
 995         }
 996     ' "$@"
 997 }
 998 
 999 # Grep, Recursive Interactive and Plain
1000 # grip() { ugrep -r -Q --color=never -E "$@"; }
1001 
1002 # Global extended regex SUBstitute, using the AWK function of the same name:
1003 # arguments are used as regex/replacement pairs, in that order
1004 gsub() {
1005     awk '
1006         BEGIN {
1007             for (i = 1; i < ARGC; i++) {
1008                 args[++n] = ARGV[i]
1009                 delete ARGV[i]
1010             }
1011         }
1012         {
1013             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1014             print; fflush()
1015         }
1016     ' "$@"
1017 }
1018 
1019 # Highlight (lines) with AWK
1020 hawk() {
1021     local cond="${1:-1}"
1022     [ $# -gt 0 ] && shift
1023     awk '
1024         { low = lower = tolower($0) }
1025         '"${cond}"' {
1026             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1027             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1028             next
1029         }
1030         { print; fflush() }
1031     ' "$@"
1032 }
1033 
1034 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1035 # second by default; uses my script `waveout`
1036 heartbeat() {
1037     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1038     local b='((12, u), (8, (u-0.25)%1))'
1039     local f="sum($a for v in $b) / 2"
1040     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1041     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1042     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1043 }
1044 
1045 # Highlighted-style ECHO
1046 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1047 
1048 # show each byte as a pair of HEXadecimal (base-16) symbols
1049 hexify() {
1050     cat "$@" | od -x -A n |
1051         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1052 }
1053 
1054 # HIghlighted-style ECHO
1055 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1056 
1057 # highlight lines
1058 highlight() {
1059     awk '
1060         {
1061             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1062             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1063         }
1064     ' "$@"
1065 }
1066 
1067 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1068 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1069 # involving several steps
1070 hileak() {
1071     awk '
1072         {
1073             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1074             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1075             print; fflush()
1076         }
1077     ' "$@"
1078 }
1079 
1080 # highlight lines
1081 hilite() {
1082     awk '
1083         {
1084             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1085             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1086         }
1087     ' "$@"
1088 }
1089 
1090 # Help Me Remember my custom shell commands
1091 hmr() {
1092     local cmd="bat"
1093     # debian linux uses a different name for the `bat` app
1094     if [ -e "/usr/bin/batcat" ]; then
1095         cmd="batcat"
1096     fi
1097 
1098     "$cmd" \
1099         --style=plain,header,numbers --theme='Monokai Extended Light' \
1100         --wrap=never --color=always "$(which clam)" |
1101             sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1102 }
1103 
1104 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1105 hms() {
1106     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1107         x = $0
1108         h = (x - x % 3600) / 3600
1109         m = (x % 3600) / 60
1110         s = x % 60
1111         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1112     }'
1113 }
1114 
1115 # find all hyperlinks inside HREF attributes in the input text
1116 href() {
1117     awk '
1118         BEGIN { e = "href=\"[^\"]+\"" }
1119         {
1120             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1121                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1122             }
1123         }
1124     ' "$@"
1125 }
1126 
1127 # Index all lines starting from 0, using a tab right after each line number
1128 # i() {
1129 #     local start="${1:-0}"
1130 #     [ $# -gt 0 ] && shift
1131 #     nl -b a -w 1 -v "${start}" "$@"
1132 # }
1133 
1134 # Index all lines starting from 0, using a tab right after each line number
1135 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1136 
1137 # avoid/ignore lines which case-insensitively match any of the regexes given
1138 iavoid() {
1139     awk '
1140         BEGIN {
1141             if (IGNORECASE == "") {
1142                 m = "this variant of AWK lacks case-insensitive regex-matching"
1143                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1144                 exit 125
1145             }
1146             IGNORECASE = 1
1147 
1148             for (i = 1; i < ARGC; i++) {
1149                 e[i] = ARGV[i]
1150                 delete ARGV[i]
1151             }
1152         }
1153 
1154         {
1155             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1156             print; fflush(); got++
1157         }
1158 
1159         END { exit(got == 0) }
1160     ' "${@:-^\r?$}"
1161 }
1162 
1163 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1164 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1165 
1166 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1167 idrop() {
1168     awk '
1169         BEGIN {
1170             if (IGNORECASE == "") {
1171                 m = "this variant of AWK lacks case-insensitive regex-matching"
1172                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1173                 exit 125
1174             }
1175             IGNORECASE = 1
1176 
1177             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1178         }
1179 
1180         {
1181             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1182             print; fflush()
1183         }
1184     ' "${@:-\r$}"
1185 }
1186 
1187 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1188 ierase() {
1189     awk '
1190         BEGIN {
1191             if (IGNORECASE == "") {
1192                 m = "this variant of AWK lacks case-insensitive regex-matching"
1193                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1194                 exit 125
1195             }
1196             IGNORECASE = 1
1197 
1198             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1199         }
1200 
1201         {
1202             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1203             print; fflush()
1204         }
1205     ' "${@:-\r$}"
1206 }
1207 
1208 # ignore command in a pipe: this allows quick re-editing of pipes, while
1209 # still leaving signs of previously-used steps, as a memo
1210 ignore() { cat; }
1211 
1212 # only keep lines which case-insensitively match any of the regexes given
1213 imatch() {
1214     awk '
1215         BEGIN {
1216             if (IGNORECASE == "") {
1217                 m = "this variant of AWK lacks case-insensitive regex-matching"
1218                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1219                 exit 125
1220             }
1221             IGNORECASE = 1
1222 
1223             for (i = 1; i < ARGC; i++) {
1224                 e[i] = ARGV[i]
1225                 delete ARGV[i]
1226             }
1227         }
1228 
1229         {
1230             for (i = 1; i < ARGC; i++) {
1231                 if ($0 ~ e[i]) {
1232                     print; fflush()
1233                     got++
1234                     next
1235                 }
1236             }
1237         }
1238 
1239         END { exit(got == 0) }
1240     ' "${@:-[^\r]}"
1241 }
1242 
1243 # start each non-empty line with extra n spaces
1244 indent() {
1245     awk '
1246         BEGIN {
1247             n = ARGV[1] + 0
1248             delete ARGV[1]
1249             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
1250         }
1251 
1252         /^\r?$/ { print ""; fflush(); next }
1253         { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() }
1254     ' "$@"
1255 }
1256 
1257 # listen to INTENSE streaming radio
1258 intense() {
1259     printf "streaming \e[7mIntense Radio\e[0m\n"
1260     mpv --quiet https://secure.live-streams.nl/flac.flac
1261 }
1262 
1263 # emit each word-like item from each input line on its own line; when a file
1264 # has tabs on its first line, items are split using tabs alone, which allows
1265 # items to have spaces in them
1266 items() {
1267     awk '
1268         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
1269         { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() }
1270     ' "$@"
1271 }
1272 
1273 # case-insensitively deduplicate lines, keeping them in their original order:
1274 # the checking/matching is case-insensitive, but each first match is output
1275 # exactly as is
1276 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1277 
1278 # shrink/compact Json data, allowing an optional filepath
1279 # j0() { python -m json.tool --compact "${1:--}"; }
1280 
1281 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1282 # even an optional transformation formula after that
1283 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1284 
1285 # show Json data on multiple lines, using 2 spaces for each indentation level,
1286 # allowing an optional filepath
1287 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1288 
1289 # show Json data on multiple lines, using 2 spaces for each indentation level,
1290 # allowing an optional filepath, and even an optional transformation formula
1291 # after that
1292 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1293 
1294 # listen to streaming JAZZ music
1295 jazz() {
1296     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1297     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1298     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1299 }
1300 
1301 # show a `dad` JOKE from the web, sometimes even a very funny one
1302 # joke() {
1303 #     curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--'
1304 #     # plain-text output from previous cmd doesn't end with a line-feed
1305 #     printf "\n"
1306 # }
1307 
1308 # show a `dad` JOKE from the web, sometimes even a very funny one
1309 joke() {
1310     curl --show-error -s https://icanhazdadjoke.com | fold -s |
1311         awk '{ gsub(/ *\r?$/, ""); print }'
1312 }
1313 
1314 # shrink/compact JSON data, allowing an optional filepath
1315 # json0() { python -m json.tool --compact "${1:--}"; }
1316 
1317 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1318 # even an optional transformation formula after that
1319 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1320 
1321 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1322 # allowing an optional filepath
1323 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1324 
1325 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1326 # allowing an optional filepath, and even an optional transformation formula
1327 # after that
1328 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1329 
1330 # turn JSON Lines into a proper JSON array
1331 jsonl2json() { jq -s -M "${@:-.}"; }
1332 
1333 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1334 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1335 
1336 # only keep the file-extension part from lines ending with file-extensions
1337 # justext() {
1338 #     awk '
1339 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1340 #     ' "$@"
1341 # }
1342 
1343 # only keep the file-extension part from lines ending with file-extensions
1344 justext() {
1345     awk '
1346         !/^\./ && /\./ {
1347             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1348                 print substr($0, RSTART, RLENGTH); fflush()
1349             }
1350         }
1351     ' "$@"
1352 }
1353 
1354 # only keep lines ending with a file-extension of any popular picture format
1355 justpictures() {
1356     awk '
1357         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1358             gsub(/ *\r?$/, ""); print; fflush()
1359         }
1360     ' "$@"
1361 }
1362 
1363 # only keep lines ending with a file-extension of any popular sound format
1364 justsounds() {
1365     awk '
1366         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1367             gsub(/ *\r?$/, ""); print; fflush()
1368         }
1369     ' "$@"
1370 }
1371 
1372 # only keep lines ending with a file-extension of any popular video format
1373 justvideos() {
1374     awk '
1375         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1376             gsub(/ *\r?$/, ""); print; fflush()
1377         }
1378     ' "$@"
1379 }
1380 
1381 # convert binary KiloBytes into bytes
1382 kb() {
1383     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1384         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1385         sed 's-\.00*$--'
1386 }
1387 
1388 # run `less`, showing line numbers, among other settings
1389 l() { less -JMKNiCRS "$@"; }
1390 
1391 # Like A Book groups lines as 2 side-by-side pages, the same way books
1392 # do it; uses my script `book`
1393 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1394 
1395 # find the LAN (local-area network) IP address for this device
1396 lanip() { hostname -I; }
1397 
1398 # Line xARGS: `xargs` using line separators, which handles filepaths
1399 # with spaces, as long as the standard input has 1 path per line
1400 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; }
1401 
1402 # get the last n lines, or 1 by default
1403 # last() { tail -n "${1:-1}" "${2:--}"; }
1404 
1405 # get up to the last given number of bytes
1406 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1407 
1408 # get the last n lines, or 1 by default
1409 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1410 
1411 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1412 # as given, and where all other symbols become question marks, one question
1413 # mark for each code-point byte
1414 latinize() {
1415     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1416 }
1417 
1418 # Lowercased (lines) AWK
1419 lawk() {
1420     local code="${1:-1}"
1421     [ $# -gt 0 ] && shift
1422     awk "
1423         {
1424             line = orig = original = \$0
1425             low = lower = tolower(\$0)
1426             \$0 = lower
1427         }
1428         ${code}
1429         { fflush() }
1430     " "$@";
1431 }
1432 
1433 # convert pounds (LB) into kilograms
1434 lb() {
1435     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1436         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1437 }
1438 
1439 # turn the first n space-separated fields on each line into tab-separated
1440 # ones; this behavior is useful to make the output of many cmd-line tools
1441 # into TSV, since filenames are usually the last fields, and these may
1442 # contain spaces which aren't meant to be split into different fields
1443 leadtabs() {
1444     local n="$1"
1445     local cmd
1446     cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1447     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1448     sed -u -E "${cmd}"
1449 }
1450 
1451 # run `less`, showing line numbers, among other settings
1452 least() { less -JMKNiCRS "$@"; }
1453 
1454 # limit stops at the first n bytes, or 1024 bytes by default
1455 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1456 
1457 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1458 # and using the first n lines as a sticky-header (1 by default), so they
1459 # always show on top
1460 lh() {
1461     local n="${1:-1}"
1462     [ $# -gt 0 ] && shift
1463     less --header="$n" -JMKNiCRS "$@"
1464 }
1465 
1466 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1467 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1468 # and ensuring each input's last line ends with a line-feed
1469 lines() {
1470     awk '
1471         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1472         { gsub(/\r$/, ""); print; fflush() }
1473     ' "$@"
1474 }
1475 
1476 # regroup adjacent lines into n-item tab-separated lines
1477 lineup() {
1478     local n="${1:-0}"
1479     [ $# -gt 0 ] && shift
1480 
1481     if [ "$n" -le 0 ]; then
1482         awk '
1483             NR > 1 { printf "\t" }
1484             { printf "%s", $0; fflush() }
1485             END { if (NR > 0) print "" }
1486         ' "$@"
1487         return $?
1488     fi
1489 
1490     awk -v n="$n" '
1491         NR % n != 1 && n > 1 { printf "\t" }
1492         { printf "%s", $0; fflush() }
1493         NR % n == 0 { print ""; fflush() }
1494         END { if (NR % n != 0) print "" }
1495     ' "$@"
1496 }
1497 
1498 # find all hyperLINKS (https:// and http://) in the input text
1499 links() {
1500     awk '
1501         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1502         {
1503             # match all links in the current line
1504             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1505                 print substr(s, RSTART, RLENGTH); fflush()
1506             }
1507         }
1508     ' "$@"
1509 }
1510 
1511 # List files, using the `Long` option
1512 # ll() { ls -l "$@"; }
1513 
1514 # LOAD data from the filename or URI given; uses my script `get`
1515 load() { get "$@"; }
1516 
1517 # LOwercase line, check (awk) COndition: on each success, the original line
1518 # is output with its original letter-casing, as its lower-cased version is
1519 # only a convenience meant for the condition
1520 loco() {
1521     local cond="${1:-1}"
1522     [ $# -gt 0 ] && shift
1523     awk "
1524         {
1525             line = orig = original = \$0
1526             low = lower = tolower(\$0)
1527             \$0 = lower
1528         }
1529         ${cond} { print line; fflush() }
1530     " "$@"
1531 }
1532 
1533 # LOcal SERver webserves files in a folder as localhost, using the port
1534 # number given, or port 8080 by default
1535 loser() {
1536     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1537     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1538 }
1539 
1540 # LOWercase all ASCII symbols
1541 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1542 
1543 # LOWERcase all ASCII symbols
1544 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1545 
1546 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1547 lrg() { rg --line-buffered "$@"; }
1548 
1549 # Listen To Youtube
1550 lty() {
1551     local url
1552     # some youtube URIs end with extra playlist/tracker parameters
1553     url="$(echo "$1" | sed 's-&.*--')"
1554     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1555 }
1556 
1557 # Match lines with any of the regexes given
1558 m() {
1559     awk '
1560         BEGIN {
1561             for (i = 1; i < ARGC; i++) {
1562                 e[i] = ARGV[i]
1563                 delete ARGV[i]
1564             }
1565         }
1566 
1567         {
1568             for (i = 1; i < ARGC; i++) {
1569                 if ($0 ~ e[i]) {
1570                     print; fflush()
1571                     got++
1572                     next
1573                 }
1574             }
1575         }
1576 
1577         END { exit(got == 0) }
1578     ' "${@:-[^\r]}"
1579 }
1580 
1581 # only keep lines which match any of the regexes given
1582 match() {
1583     awk '
1584         BEGIN {
1585             for (i = 1; i < ARGC; i++) {
1586                 e[i] = ARGV[i]
1587                 delete ARGV[i]
1588             }
1589         }
1590 
1591         {
1592             for (i = 1; i < ARGC; i++) {
1593                 if ($0 ~ e[i]) {
1594                     print; fflush()
1595                     got++
1596                     next
1597                 }
1598             }
1599         }
1600 
1601         END { exit(got == 0) }
1602     ' "${@:-[^\r]}"
1603 }
1604 
1605 # MAX Width truncates lines up to the given number of items/bytes given, or up
1606 # to 80 by default; output lines end with an ANSI reset-code, in case input
1607 # lines use ANSI styles
1608 maxw() {
1609     local maxwidth="${1:-80}"
1610     [ $# -gt 0 ] && shift
1611     awk -v maxw="${maxwidth}" '
1612         {
1613             gsub(/\r$/, "")
1614             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1615         }
1616     ' "$@"
1617 }
1618 
1619 # convert binary MegaBytes into bytes
1620 mb() {
1621     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1622         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1623         sed 's-\.00*$--'
1624 }
1625 
1626 # Multi-Core MAKE runs `make` using all cores
1627 mcmake() { make -j "$(nproc)" "$@"; }
1628 
1629 # Multi-Core MaKe runs `make` using all cores
1630 mcmk() { make -j "$(nproc)" "$@"; }
1631 
1632 # merge stderr into stdout, without any ugly keyboard-dancing
1633 # merrge() { "$@" 2>&1; }
1634 
1635 # convert MIles into kilometers
1636 mi() {
1637     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1638         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1639 }
1640 
1641 # convert MIles² (squared) into kilometers²
1642 mi2() {
1643     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1644         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1645 }
1646 
1647 # Make In Folder
1648 mif() {
1649     local code
1650     pushd "${1:-.}" > /dev/null || return
1651     [ $# -gt 0 ] && shift
1652     make "$@"
1653     code=$?
1654     popd > /dev/null || return "${code}"
1655     return "${code}"
1656 }
1657 
1658 # Media INFO
1659 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1660 
1661 # Media INFO
1662 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1663 
1664 # run `make`
1665 mk() { make "$@"; }
1666 
1667 # convert Miles Per Hour into kilometers per hour
1668 mph() {
1669     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1670         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1671 }
1672 
1673 # Number all lines, using a tab right after each line number
1674 # n() {
1675 #     local start="${1:-1}"
1676 #     [ $# -gt 0 ] && shift
1677 #     nl -b a -w 1 -v "${start}" "$@"
1678 # }
1679 
1680 # Number all lines, using a tab right after each line number
1681 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1682 
1683 # Not AND sorts its 2 inputs, then finds lines not in common
1684 nand() {
1685     # comm -3 <(sort "$1") <(sort "$2")
1686     # dash doesn't support the process-sub syntax
1687     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1688 }
1689 
1690 # Nice Byte Count, using my scripts `nn` and `cext`
1691 nbc() { wc -c "$@" | nn --gray | cext; }
1692 
1693 # listen to streaming NEW WAVE music
1694 newwave() {
1695     printf "streaming \e[7mNew Wave radio\e[0m\n"
1696     mpv --quiet https://puma.streemlion.com:2910/stream
1697 }
1698 
1699 # NIce(r) COlumns makes the output of many commands whose output starts with
1700 # a header line easier to read; uses my script `nn`
1701 nico() {
1702     awk '
1703         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1704         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1705     ' "$@" | nn --gray | less -JMKiCRS
1706 }
1707 
1708 # emit nothing to output and/or discard everything from input
1709 nil() {
1710     if [ $# -gt 0 ]; then
1711         "$@" > /dev/null
1712     else
1713         cat < /dev/null
1714     fi
1715 }
1716 
1717 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1718 njnn() { nj "$@" | nn --gray; }
1719 
1720 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1721 naman() {
1722     local w
1723     w="$(tput cols)"
1724     if [ "$w" -gt 120 ]; then
1725         w="$((w / 2 - 1))"
1726     fi
1727     MANWIDTH="$w" man "$@"
1728 }
1729 
1730 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1731 nman() {
1732     local w
1733     w="$(tput cols)"
1734     if [ "$w" -gt 120 ]; then
1735         w="$((w / 2 - 1))"
1736     fi
1737     MANWIDTH="$w" man "$@"
1738 }
1739 
1740 # convert Nautical MIles into kilometers
1741 nmi() {
1742     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1743         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1744 }
1745 
1746 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1747 # noerr() { "$@" 2> /dev/null; }
1748 
1749 # play a white-noise sound lasting the number of seconds given, or for 1
1750 # second by default; uses my script `waveout`
1751 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1752 
1753 # show the current date and time
1754 now() { date +'%Y-%m-%d %H:%M:%S'; }
1755 
1756 # Nice Processes shows/lists all current processes; uses my script `nn`
1757 np() {
1758     local res
1759     local code
1760     # res="$(ps "${@:-auxf}")"
1761     res="$(ps "${@:-aux}")"
1762     code=$?
1763     if [ "${code}" -ne 0 ]; then
1764         return "${code}"
1765     fi
1766 
1767     echo "${res}" | awk '
1768         BEGIN {
1769             d = strftime("%a %b %d")
1770             t = strftime("%H:%M:%S")
1771             # printf "%s  %s\n\n", d, t
1772             # printf "\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", d, t
1773             # printf "%30s\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", "", d, t
1774             # printf "%30s%s  %s\n\n", "", d, t
1775             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1776         }
1777 
1778         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1779 
1780         $1 == "root" {
1781             # gsub(/^/, "\x1b[36m")
1782             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[36m")
1783             gsub(/^/, "\x1b[34m")
1784             gsub(/ +/, "&\x1b[0m\x1b[34m")
1785             gsub(/$/, "\x1b[0m")
1786         }
1787 
1788         {
1789             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1790             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1791             printf "%3d  %s\n", NR - 1, $0
1792         }
1793     ' | nn --gray | less -JMKiCRS
1794 }
1795 
1796 # Nice Size, using my scripts `nn` and `cext`
1797 ns() { wc -c "$@" | nn --gray | cext; }
1798 
1799 # Nice Transform Json, using my scripts `tj`, and `nj`
1800 ntj() { tj "$@" | nj; }
1801 
1802 # Nice TimeStamp
1803 nts() {
1804     ts '%Y-%m-%d %H:%M:%S' |
1805         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1806 }
1807 
1808 # emit nothing to output and/or discard everything from input
1809 null() {
1810     if [ $# -gt 0 ]; then
1811         "$@" > /dev/null
1812     else
1813         cat < /dev/null
1814     fi
1815 }
1816 
1817 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1818 # line-feed byte
1819 nullines() {
1820     awk -v ORS='\000' '
1821         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1822         { gsub(/\r$/, ""); print; fflush() }
1823     ' "$@"
1824 }
1825 
1826 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1827 # the syntax of shell functions
1828 nwat() {
1829     local a
1830     local gap=0
1831 
1832     if [ $# -eq 0 ]; then
1833         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1834         return 1
1835     fi
1836 
1837     local cmd="bat"
1838     # debian linux uses a different name for the `bat` app
1839     if [ -e "/usr/bin/batcat" ]; then
1840         cmd="batcat"
1841     fi
1842 
1843     for a in "$@"; do
1844         [ "${gap}" -gt 0 ] && printf "\n"
1845         gap=1
1846         # printf "\e[7m%-80s\e[0m\n" "$a"
1847         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1848 
1849         # resolve 1 alias level
1850         if alias "$a" 2> /dev/null > /dev/null; then
1851             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1852         fi
1853 
1854         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1855             # resolved aliases with args/spaces in them would otherwise fail
1856             echo "$a"
1857         elif whence -f "$a" > /dev/null 2> /dev/null; then
1858             # zsh seems to show a shell function's code only via `whence -f`
1859             whence -f "$a"
1860         elif type "$a" > /dev/null 2> /dev/null; then
1861             # dash doesn't support `declare`, and `type` in bash emits
1862             # a redundant first output line, when it's a shell function
1863             type "$a" | awk '
1864                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1865                 { print; fflush() }
1866                 END { if (NR < 2 && skipped) print skipped }
1867             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1868                 --wrap=never --color=always |
1869                     sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1870         else
1871             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1872         fi
1873     done | less -JMKiCRS
1874 }
1875 
1876 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1877 # alternating styles to make long numbers easier to read
1878 # nwc() { wc "$@" | nn --gray; }
1879 
1880 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1881 # alternating styles to make long numbers easier to read
1882 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1883 
1884 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1885 # `cext`, alternating styles to make long numbers easier to read
1886 nwc() {
1887     wc "$@" | sort -rn | nn --gray | cext |
1888         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1889 }
1890 
1891 # Nice Weather Forecast
1892 nwf() {
1893     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
1894     curl --show-error -s telnet://graph.no:79 |
1895     sed -E \
1896         -e 's/ *\r?$//' \
1897         -e '/^\[/d' \
1898         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
1899         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
1900         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
1901         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
1902         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
1903         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
1904         -e 's/\*/○/g' |
1905     awk 1 |
1906     less -JMKiCRS
1907 }
1908 
1909 # Nice Zoom Json, using my scripts `zj`, and `nj`
1910 nzj() { zj "$@" | nj; }
1911 
1912 # make text Plain, by ignoring ANSI terminal styling
1913 p() {
1914     awk '
1915         {
1916             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1917             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
1918             print; fflush()
1919         }
1920     ' "$@"
1921 }
1922 
1923 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1924 # pawk() { awk -F='' -v RS='' "$@"; }
1925 
1926 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1927 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1928 
1929 # Plain `fd`
1930 pfd() { fd --color=never "$@"; }
1931 
1932 # pick lines, using all the 1-based line-numbers given
1933 picklines() {
1934     awk '
1935         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1936         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1937         { l[++n] = $0 }
1938         END {
1939             for (i = 1; i <= m; i++) {
1940                 j = p[i]
1941                 if (j < 0) j += NR + 1
1942                 if (0 < j && j <= NR) print l[j]
1943             }
1944         }
1945     ' "$@"
1946 }
1947 
1948 # Plain Interactive Grep
1949 pig() { ugrep --color=never -Q -E "$@"; }
1950 
1951 # make text plain, by ignoring ANSI terminal styling
1952 plain() {
1953     awk '
1954         {
1955             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1956             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
1957             print; fflush()
1958         }
1959     ' "$@"
1960 }
1961 
1962 # end all lines with an ANSI-code to reset styles
1963 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1964 
1965 # end all lines with an ANSI-code to reset styles
1966 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1967 
1968 # play audio/video media
1969 # play() { mplayer -msglevel all=-1 "${@:--}"; }
1970 
1971 # play audio/video media
1972 play() { mpv "${@:--}"; }
1973 
1974 # Pick LINE, using the 1-based line-number given
1975 pline() {
1976     local line="$1"
1977     [ $# -gt 0 ] && shift
1978     awk -v n="${line}" '
1979         BEGIN { if (n < 1) exit 0 }
1980         NR == n { print; exit 0 }
1981     ' "$@"
1982 }
1983 
1984 # Paused MPV; especially useful when trying to view pictures via `mpv`
1985 pmpv() { mpv --pause "${@:--}"; }
1986 
1987 # Print Python result
1988 pp() { python -c "print($1)"; }
1989 
1990 # PRecede (input) ECHO, prepends a first line to stdin lines
1991 precho() { echo "$@" && cat /dev/stdin; }
1992 
1993 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
1994 prememo() {
1995     awk '
1996         BEGIN {
1997             if (ARGC > 1) printf "\x1b[7m"
1998             for (i = 1; i < ARGC; i++) {
1999                 if (i > 1) printf " "
2000                 printf "%s", ARGV[i]
2001                 delete ARGV[i]
2002             }
2003             if (ARGC > 1) printf "\x1b[0m\n"
2004             fflush()
2005         }
2006         { print; fflush() }
2007     ' "$@"
2008 }
2009 
2010 # start by joining all arguments given as a tab-separated-items line of output,
2011 # followed by all lines from stdin verbatim
2012 pretsv() {
2013     awk '
2014         BEGIN {
2015             for (i = 1; i < ARGC; i++) {
2016                 if (i > 1) printf "\t"
2017                 printf "%s", ARGV[i]
2018                 delete ARGV[i]
2019             }
2020             if (ARGC > 1) printf "\n"
2021             fflush()
2022         }
2023         { print; fflush() }
2024     ' "$@"
2025 }
2026 
2027 # Plain Recursive Interactive Grep
2028 prig() { ugrep --color=never -r -Q -E "$@"; }
2029 
2030 # show/list all current processes
2031 processes() {
2032     local res
2033     res="$(ps aux)"
2034     echo "${res}" | awk '!/ps aux$/' | sed -E \
2035         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
2036         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
2037 }
2038 
2039 # Play Youtube Audio
2040 pya() {
2041     local url
2042     # some youtube URIs end with extra playlist/tracker parameters
2043     url="$(echo "$1" | sed 's-&.*--')"
2044     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2045 }
2046 
2047 # Quiet ignores stderr, without any ugly keyboard-dancing
2048 q() { "$@" 2> /dev/null; }
2049 
2050 # Quiet MPV
2051 qmpv() { mpv --quiet "${@:--}"; }
2052 
2053 # ignore stderr, without any ugly keyboard-dancing
2054 quiet() { "$@" 2> /dev/null; }
2055 
2056 # Reset the screen, which empties it and resets the current style
2057 r() { reset; }
2058 
2059 # keep only lines between the 2 line numbers given, inclusively
2060 rangelines() {
2061     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
2062         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
2063 }
2064 
2065 # RANdom MANual page
2066 ranman() {
2067     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
2068         sed 's-\.gz$--' | xargs man
2069 }
2070 
2071 # Run AWK expression
2072 rawk() {
2073     local expr="${1:-0}"
2074     [ $# -gt 0 ] && shift
2075     awk "BEGIN { print ${expr}; exit }" "$@"
2076 }
2077 
2078 # play a ready-phone-line sound lasting the number of seconds given, or for 1
2079 # second by default; uses my script `waveout`
2080 ready() {
2081     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
2082     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2083 }
2084 
2085 # reflow/trim lines of prose (text) to improve its legibility: it's especially
2086 # useful when the text is pasted from web-pages being viewed in reader mode
2087 reprose() {
2088     local w="${1:-80}"
2089     [ $# -gt 0 ] && shift
2090     awk '
2091         FNR == 1 && NR > 1 { print "" }
2092         { gsub(/\r$/, ""); print; fflush() }
2093     ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--'
2094 }
2095 
2096 # ignore ansi styles from stdin and restyle things using the style-name given;
2097 # uses my script `style`
2098 restyle() { style "$@"; }
2099 
2100 # change the tab-title on your terminal app
2101 retitle() { printf "\e]0;%s\a\n" "$*"; }
2102 
2103 # REVerse-order SIZE (byte-count)
2104 revsize() { wc -c "$@" | sort -rn; }
2105 
2106 # Run In Folder
2107 rif() {
2108     local code
2109     pushd "${1:-.}" > /dev/null || return
2110     [ $# -gt 0 ] && shift
2111     "$@"
2112     code=$?
2113     popd > /dev/null || return "${code}"
2114     return "${code}"
2115 }
2116 
2117 # play a ringtone-style sound lasting the number of seconds given, or for 1
2118 # second by default; uses my script `waveout`
2119 ringtone() {
2120     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2121     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2122 }
2123 
2124 # Read-Only Editor
2125 roe() { micro -readonly true "$@"; }
2126 
2127 # Read-Only Micro (text editor)
2128 rom() { micro -readonly true "$@"; }
2129 
2130 # run the command given, trying to turn its output into TSV (tab-separated
2131 # values); uses my script `dejson`
2132 rtab() { jc "$@" | dejson; }
2133 
2134 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2135 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2136 
2137 # show a RULER-like width-measuring line
2138 ruler() {
2139     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2140         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2141 }
2142 
2143 # run the command given, trying to turn its output into TSV (tab-separated
2144 # values); uses my script `dejson`
2145 runtab() { jc "$@" | dejson; }
2146 
2147 # run the command given, trying to turn its output into TSV (tab-separated
2148 # values); uses my script `dejson`
2149 runtsv() { jc "$@" | dejson; }
2150 
2151 # Reverse-order WC
2152 rwc() { wc "$@" | sort -rn; }
2153 
2154 # extended-mode Sed, enabling its full regex syntax
2155 # s() { sed -E -u "$@"; }
2156 
2157 # Silent CURL spares you the progress bar, but still tells you about errors
2158 scurl() { curl --show-error -s "$@"; }
2159 
2160 # show a unique-looking SEParator line; useful to run between commands
2161 # which output walls of text
2162 sep() {
2163     [ "${1:-80}" -gt 0 ] &&
2164         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2165 }
2166 
2167 # webSERVE files in a folder as localhost, using the port number given, or
2168 # port 8080 by default
2169 serve() {
2170     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2171     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2172 }
2173 
2174 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2175 setdiff() {
2176     # comm -23 <(sort "$1") <(sort "$2")
2177     # dash doesn't support the process-sub syntax
2178     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2179 }
2180 
2181 # SET INtersection, sorts its 2 inputs, then finds common lines
2182 setin() {
2183     # comm -12 <(sort "$1") <(sort "$2")
2184     # dash doesn't support the process-sub syntax
2185     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2186 }
2187 
2188 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2189 setsub() {
2190     # comm -23 <(sort "$1") <(sort "$2")
2191     # dash doesn't support the process-sub syntax
2192     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2193 }
2194 
2195 # Show Files (and folders), coloring folders and links; uses my script `nn`
2196 sf() {
2197     ls -al --file-type --color=never --time-style iso "$@" | awk '
2198         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2199         {
2200             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2201             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2202             printf "%6d  %s\n", NR - 1, $0; fflush()
2203         }
2204     ' | nn --gray | less -JMKiCRS
2205 }
2206 
2207 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2208 # uses my scripts `nn` and `cext`
2209 sfp() {
2210     ls -al --file-type --color=never --time-style iso "$@" | awk '
2211         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2212         {
2213             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2214             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2215             printf "%6d  %s\n", NR - 1, $0; fflush()
2216         }
2217     ' | nn --gray | cext | less -JMKiCRS
2218 }
2219 
2220 # Show File Sizes, using my scripts `nn` and `cext`
2221 sfs() {
2222     # turn arg-list into single-item lines
2223     printf "%s\x00" "$@" |
2224     # calculate file-sizes, and reverse-sort results
2225     xargs -0 wc -c | sort -rn |
2226     # add/realign fields to improve legibility
2227     awk '
2228         # start output with a header-like line, and add a MiB field
2229         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2230         # make table breathe with empty lines, so tall outputs are readable
2231         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2232         # emit regular output lines
2233         {
2234             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2235             # first field is likely space-padded
2236             gsub(/^ */, "")
2237             # slice line after the first field, as filepaths can have spaces
2238             $0 = substr($0, length($1) + 1)
2239             # first field is likely space-padded
2240             gsub(/^ /, "")
2241             printf "%s\n", $0; fflush()
2242         }
2243     ' |
2244     # make zeros in the MiB field stand out with a special color
2245     awk '
2246         {
2247             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2248             print; fflush()
2249         }
2250     ' |
2251     # make numbers nice, alternating styles along 3-digit groups
2252     nn --gray |
2253     # color-code file extensions
2254     cext |
2255     # make result interactively browsable
2256     less -JMKiCRS
2257 }
2258 
2259 # SHell-run AWK output
2260 # shawk() { stdbuf -oL awk "$@" | sh; }
2261 
2262 # time-run various tools given one-per-line from stdin, giving them extra
2263 # common arguments passed as explicit arguments
2264 showdown() {
2265     awk '
2266         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2267         {
2268             printf "%s", $0
2269             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2270             printf "\x00"; fflush()
2271         }
2272     ' "$@" | xargs -0 hyperfine --style full
2273 }
2274 
2275 # SHOW a command, then RUN it
2276 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2277 
2278 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2279 # single-filepaths compatible with `xargs`, since standard shell settings
2280 # get in the way of filepaths with spaces and other special symbols in them
2281 shquote() {
2282     awk '
2283         {
2284             s = $0
2285             gsub(/\r$/, "", s)
2286             gsub(/\\/, "\\\\", s)
2287             gsub(/"/, "\\\"", s)
2288             gsub(/\$/, "\\$", s)
2289             printf "\"%s\"\n", s; fflush()
2290         }
2291     ' "$@"
2292 }
2293 
2294 # clean the screen, after running the command given
2295 # sideshow() { tput smcup; "$@"; tput rmcup; }
2296 
2297 # skip the first n lines, or the 1st line by default
2298 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2299 
2300 # skip the first n bytes
2301 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2302 
2303 # skip the last n lines, or the last line by default
2304 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2305 
2306 # skip the last n bytes
2307 skiplastbytes() { head -c -"$1" "${2:--}"; }
2308 
2309 # skip the last n lines, or the last line by default
2310 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2311 
2312 # skip the first n lines, or the 1st line by default
2313 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2314 
2315 # SLOW/delay lines from the standard-input, waiting the number of seconds
2316 # given for each line, or waiting 1 second by default
2317 slow() {
2318     local seconds="${1:-1}"
2319     (
2320         IFS="$(printf "\n")"
2321         while read -r line; do
2322             sleep "${seconds}"
2323             printf "%s\n" "${line}"
2324         done
2325     )
2326 }
2327 
2328 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2329 slp() {
2330     local title
2331     title="Latest Podcast Episodes as of $(date +'%F %T')"
2332     podfeed -title "${title}" "$@" | si
2333 }
2334 
2335 # recursively find all files with fewer bytes than the number given
2336 smallfiles() {
2337     local n
2338     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2339     [ $# -gt 0 ] && shift
2340 
2341     local arg
2342     for arg in "${@:-.}"; do
2343         if [ ! -d "${arg}" ]; then
2344             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2345             return 1
2346         fi
2347         stdbuf -oL find "${arg}" -type f -size -"$n"c
2348     done
2349 }
2350 
2351 # emit the first line as is, sorting all lines after that, using the
2352 # `sort` command, passing all/any arguments/options to it
2353 sortrest() {
2354     awk -v sort="sort $*" '
2355         { gsub(/\r$/, "") }
2356         NR == 1 { print; fflush() }
2357         NR > 1 { print | sort }
2358     '
2359 }
2360 
2361 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2362 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2363 # all/any arguments/options to it
2364 sortsv() {
2365     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2366         { gsub(/\r$/, "") }
2367         NR == 1 { print; fflush() }
2368         NR > 1 { print | sort }
2369     '
2370 }
2371 
2372 # emit a line with the number of spaces given in it
2373 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2374 
2375 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2376 # in the middle of lines, as well as trailing carriage returns
2377 squeeze() {
2378     awk '
2379         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2380         {
2381             gsub(/^ +| *\r?$/, "")
2382             gsub(/ *\t */, "\t")
2383             gsub(/  +/, " ")
2384             print; fflush()
2385         }
2386     ' "$@"
2387 }
2388 
2389 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2390 # of multiple spaces in the middle of lines, as well as trailing carriage
2391 # returns, while also turning runs of empty lines into single empty lines,
2392 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2393 # lines vertically
2394 squomp() {
2395     awk '
2396         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2397         /^\r?$/ { empty = 1; next }
2398         empty { if (n > 0) print ""; empty = 0 }
2399         {
2400             gsub(/^ +| *\r?$/, "")
2401             gsub(/ *\t */, "\t")
2402             gsub(/  +/, " ")
2403             print; fflush()
2404             n++
2405         }
2406     ' "$@"
2407 }
2408 
2409 # Show a command, then Run it
2410 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2411 
2412 # turn runs of empty lines into single empty lines, effectively squeezing
2413 # paragraphs vertically, so to speak; runs of empty lines both at the start
2414 # and at the end are ignored
2415 stomp() {
2416     awk '
2417         /^\r?$/ { empty = 1; next }
2418         empty { if (n > 0) print ""; empty = 0 }
2419         { print; fflush(); n++ }
2420     ' "$@"
2421 }
2422 
2423 # STRike-thru (lines) with AWK
2424 strawk() {
2425     local cond="${1:-1}"
2426     [ $# -gt 0 ] && shift
2427     awk '
2428         { low = lower = tolower($0) }
2429         '"${cond}"' {
2430             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2431             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2432             next
2433         }
2434         { print; fflush() }
2435     ' "$@"
2436 }
2437 
2438 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2439 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2440 # all/any arguments/options to it
2441 stsv() {
2442     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2443         { gsub(/\r$/, "") }
2444         NR == 1 { print; fflush() }
2445         NR > 1 { print | sort }
2446     '
2447 }
2448 
2449 # use the result of the `awk` function `substr` for each line
2450 substr() {
2451     local start="${1:-1}"
2452     local length="${2:-80}"
2453     [ $# -gt 0 ] && shift
2454     [ $# -gt 0 ] && shift
2455     awk -v start="${start}" -v len="${length}" \
2456         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2457 }
2458 
2459 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2460 # what's given, before relinquishing existing privileges
2461 # sudoff() {
2462 #     local code=0
2463 #     if [ $# -gt 0 ]; then
2464 #         sudo "$@"
2465 #         code=$?
2466 #     fi
2467 #     sudo -k
2468 #     return "${code}"
2469 # }
2470 
2471 # append a final Tab-Separated-Values line with the sums of all columns from
2472 # the input table(s) given; items from first lines aren't counted/added
2473 sumtsv() {
2474     awk -F "\t" '
2475         {
2476             print; fflush()
2477             if (width < NF) width = NF
2478         }
2479 
2480         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2481 
2482         END {
2483             for (i = 1; i <= width; i++) {
2484                 if (i > 1) printf "\t"
2485                 printf "%s", sums[i] ""
2486             }
2487             if (width > 0) printf "\n"
2488         }
2489     ' "$@"
2490 }
2491 
2492 # show a random command defined in `clam`, using `wat` from `clam` itself
2493 surprise() {
2494     wat "$(grep -E '^[a-z]+\(' "$(which clam)" | shuf -n 1 | sed -E 's-\(.*--')"
2495 }
2496 
2497 # Time the command given
2498 t() { time "$@"; }
2499 
2500 # show a reverse-sorted tally of all lines read, where ties are sorted
2501 # alphabetically
2502 tally() {
2503     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2504         # reassure users by instantly showing the header
2505         BEGIN { print "value\ttally"; fflush() }
2506         { gsub(/\r$/, ""); t[$0]++ }
2507         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2508     ' "$@"
2509 }
2510 
2511 # Tab AWK: TSV-specific I/O settings for `awk`
2512 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2513 
2514 # Tab AWK: TSV-specific I/O settings for `awk`
2515 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2516 
2517 # quick alias for my script `tbp`
2518 tb() { tbp "$@"; }
2519 
2520 # Titled conCATenate Lines highlights each filename, before emitting its
2521 # lines
2522 tcatl() {
2523     awk '
2524         FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() }
2525         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2526         { gsub(/\r$/, ""); print; fflush() }
2527     ' "$@"
2528 }
2529 
2530 # Title ECHO changes the tab-title on your terminal app
2531 techo() { printf "\e]0;%s\a\n" "$*"; }
2532 
2533 # simulate the cadence of old-fashioned teletype machines, by slowing down
2534 # the output of ASCII/UTF-8 symbols from the standard-input
2535 teletype() {
2536     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2537         IFS="$(printf "\n")"
2538         while read -r line; do
2539             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2540                 while read -r item; do
2541                     sleep 0.015
2542                     printf "%s" "${item}"
2543                 done
2544             sleep 0.75
2545             printf "\n"
2546         done
2547     )
2548 }
2549 
2550 # run `top` without showing any of its output after quitting it
2551 tip() { tput smcup; top "$@"; tput rmcup; }
2552 
2553 # change the tab-title on your terminal app
2554 title() { printf "\e]0;%s\a\n" "$*"; }
2555 
2556 # quick alias for my script `tjp`
2557 tj() { tjp "$@"; }
2558 
2559 # quick alias for my script `tlp`
2560 tl() { tlp "$@"; }
2561 
2562 # show current date in a specifc format
2563 today() { date +'%Y-%m-%d %a %b %d'; }
2564 
2565 # get the first n lines, or 1 by default
2566 toline() { head -n "${1:-1}" "${2:--}"; }
2567 
2568 # lowercase all ASCII symbols
2569 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2570 
2571 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2572 # second by default: after the optional duration, the next optional arguments
2573 # are the volume and the tone-frequency; uses my script `waveout`
2574 tone() {
2575     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2576         mpv --really-quiet -
2577 }
2578 
2579 # get the processes currently using the most cpu
2580 topcpu() {
2581     local n="${1:-10}"
2582     [ "$n" -gt 0 ] && ps aux | awk '
2583         NR == 1 { print; fflush() }
2584         NR > 1 { print | "sort -rnk3" }
2585     ' | head -n "$(("$n" + 1))"
2586 }
2587 
2588 # show all files directly in the folder given, without looking any deeper
2589 topfiles() {
2590     local arg
2591     for arg in "${@:-.}"; do
2592         if [ ! -d "${arg}" ]; then
2593             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2594             return 1
2595         fi
2596         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2597     done
2598 }
2599 
2600 # show all folders directly in the folder given, without looking any deeper
2601 topfolders() {
2602     local arg
2603     for arg in "${@:-.}"; do
2604         if [ ! -d "${arg}" ]; then
2605             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2606             return 1
2607         fi
2608         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2609             awk '!/^\.$/ { print; fflush() }'
2610     done
2611 }
2612 
2613 # get the processes currently using the most memory
2614 topmemory() {
2615     local n="${1:-10}"
2616     [ "$n" -gt 0 ] && ps aux | awk '
2617         NR == 1 { print; fflush() }
2618         NR > 1 { print | "sort -rnk6" }
2619     ' | head -n "$(("$n" + 1))"
2620 }
2621 
2622 # transpose (switch) rows and columns from tables
2623 transpose() {
2624     awk '
2625         { gsub(/\r$/, "") }
2626 
2627         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
2628 
2629         {
2630             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2631             if (maxitems < NF) maxitems = NF
2632         }
2633 
2634         END {
2635             for (j = 1; j <= maxitems; j++) {
2636                 for (i = 1; i <= NR; i++) {
2637                     if (i > 1) printf "\t"
2638                     printf "%s", lines[j][i]
2639                 }
2640                 printf "\n"
2641             }
2642         }
2643     ' "$@"
2644 }
2645 
2646 # ignore leading/trailing spaces, as well as trailing carriage returns
2647 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2648 
2649 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2650 # decimal dots themselves, when decimals in a number are all zeros; works
2651 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2652 # trimdecs() {
2653 #     awk '
2654 #         {
2655 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2656 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2657 #             print; fflush()
2658 #         }
2659 #     ' "$@"
2660 # }
2661 
2662 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2663 # decimal dots themselves, when decimals in a number are all zeros
2664 trimdecs() {
2665     awk '{ print; fflush() }' "$@" |
2666         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2667 }
2668 
2669 # ignore trailing spaces, as well as trailing carriage returns
2670 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2671 
2672 # ignore trailing spaces, as well as trailing carriage returns
2673 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2674 
2675 # ignore leading/trailing spaces, as well as trailing carriage returns
2676 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2677 
2678 # ignore leading/trailing spaces, as well as trailing carriage returns
2679 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2680 
2681 # ignore trailing spaces, as well as trailing carriage returns
2682 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2683 
2684 # ignore trailing spaces, as well as trailing carriage returns
2685 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2686 
2687 # try running a command, emitting an explicit message to standard-error
2688 # if the command given fails
2689 try() {
2690     "$@" || {
2691         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2692         return 255
2693     }
2694 }
2695 
2696 # Transform Strings with Python; uses my script `tbp`
2697 tsp() { tbp -s "$@"; }
2698 
2699 # run the command given, trying to turn its output into TSV (tab-separated
2700 # values); uses my script `dejson`
2701 tsvrun() { jc "$@" | dejson; }
2702 
2703 # Underline (lines) with AWK
2704 uawk() {
2705     local cond="${1:-1}"
2706     [ $# -gt 0 ] && shift
2707     awk '
2708         { low = lower = tolower($0) }
2709         '"${cond}"' {
2710             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2711             printf "\x1b[4m%s\x1b[0m\n", $0; fflush()
2712             next
2713         }
2714         { print; fflush() }
2715     ' "$@"
2716 }
2717 
2718 # Underline Every few lines: make groups of 5 lines (by default) stand out by
2719 # underlining the last line of each
2720 ue() {
2721     local n="${1:-5}"
2722     [ $# -gt 0 ] && shift
2723     awk -v n="$n" '
2724         BEGIN { if (n == 0) n = -1 }
2725         NR % n == 0 && NR != 1 {
2726             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2727             printf("\x1b[4m%s\x1b[0m\n", $0); fflush()
2728             next
2729         }
2730         { print; fflush() }
2731     ' "$@"
2732 }
2733 
2734 # deduplicate lines, keeping them in their original order
2735 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2736 
2737 # concatenate all named input sources unix-style: all trailing CRLFs become
2738 # single LFs, each non-empty input will always end in a LF, so lines from
2739 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2740 # first line of each input are ignored, as those are useless at best
2741 unixify() {
2742     awk '
2743         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2744         { gsub(/\r$/, ""); print; fflush() }
2745     ' "$@"
2746 }
2747 
2748 # go UP n folders, or go up 1 folder by default
2749 up() {
2750     if [ "${1:-1}" -le 0 ]; then
2751         cd .
2752         return $?
2753     fi
2754 
2755     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2756 }
2757 
2758 # convert United States Dollars into CAnadian Dollars, using the latest
2759 # official exchange rates from the bank of canada; during weekends, the
2760 # latest rate may be from a few days ago; the default amount of usd to
2761 # convert is 1, when not given
2762 usd2cad() {
2763     local site='https://www.bankofcanada.ca/valet/observations/group'
2764     local csv_rates="${site}/FX_RATES_DAILY/csv"
2765     local url
2766     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2767     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2768         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2769         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2770 }
2771 
2772 # View with `less`
2773 v() { less -JMKiCRS "$@"; }
2774 
2775 # run a command, showing its success/failure right after
2776 verdict() {
2777     local code
2778     "$@"
2779     code=$?
2780 
2781     if [ "${code}" -eq 0 ]; then
2782         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2783     else
2784         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2785     fi
2786     return "${code}"
2787 }
2788 
2789 # run `cppcheck` with even stricter options
2790 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2791 
2792 # run `cppcheck` with even stricter options
2793 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2794 
2795 # check shell scripts for common gotchas, avoiding complaints about using
2796 # the `local` keyword, which is widely supported in practice
2797 vetshell() { shellcheck -e 3043 "$@"; }
2798 
2799 # View with Header runs `less` without line numbers, with ANSI styles, no
2800 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2801 # so they always show on top
2802 vh() {
2803     local n="${1:-1}"
2804     [ $# -gt 0 ] && shift
2805     less --header="$n" -JMKiCRS "$@"
2806 }
2807 
2808 # VIEW the result of showing a command, then RUNning it, using `less`
2809 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2810 
2811 # View Nice Columns; uses my scripts `realign` and `nn`
2812 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2813 
2814 # View Nice Hexadecimals; uses my script `nh`
2815 vnh() { nh "$@" | less -JMKiCRS; }
2816 
2817 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2818 vnj() { nj "$@" | less -JMKiCRS; }
2819 
2820 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2821 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2822 
2823 # View Nice Numbers; uses my script `nn`
2824 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2825 
2826 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2827 vnt() {
2828     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2829         nt | nn --gray |
2830         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2831         less -JMKiCRS #--header=1
2832 }
2833 
2834 # View-Run using `less`: show a command, then run it
2835 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2836 
2837 # View Text with `less`
2838 # vt() { less -JMKiCRS "$@"; }
2839 
2840 # What are these (?); uses my command `nwat`
2841 # w() { nwat "$@"; }
2842 
2843 # What Are These (?) shows what the names given to it are/do
2844 wat() {
2845     local a
2846     local gap=0
2847 
2848     if [ $# -eq 0 ]; then
2849         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2850         return 1
2851     fi
2852 
2853     for a in "$@"; do
2854         [ "${gap}" -gt 0 ] && printf "\n"
2855         gap=1
2856         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2857         printf "\e[7m%-80s\e[0m\n" "$a"
2858 
2859         # resolve 1 alias level
2860         if alias "$a" 2> /dev/null > /dev/null; then
2861             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2862         fi
2863 
2864         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2865             # resolved aliases with args/spaces in them would otherwise fail
2866             echo "$a"
2867         elif whence -f "$a" > /dev/null 2> /dev/null; then
2868             # zsh seems to show a shell function's code only via `whence -f`
2869             whence -f "$a"
2870         elif type "$a" > /dev/null 2> /dev/null; then
2871             # dash doesn't support `declare`, and `type` in bash emits
2872             # a redundant first output line, when it's a shell function
2873             type "$a" | awk '
2874                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2875                 { print; fflush() }
2876                 END { if (NR < 2 && skipped) print skipped }
2877             '
2878         else
2879             printf "\e[31m%s not found\e[0m\n" "$a"
2880         fi
2881     done | less -JMKiCRS
2882 }
2883 
2884 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2885 # lines instead
2886 wctsv() {
2887     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2888     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2889         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2890         awk '
2891             NR > 1 { print prev; fflush() }
2892             { prev = $0 }
2893             END { if (NR == 1 || !/^total\t/) print }
2894         '
2895 }
2896 
2897 # get weather forecasts, almost filling the terminal's current width
2898 weather() {
2899     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2900     curl --show-error -s telnet://graph.no:79 |
2901     sed -E \
2902         -e 's/ *\r?$//' \
2903         -e '/^\[/d' \
2904         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
2905         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
2906         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
2907         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
2908         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
2909         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
2910         -e 's/\*/○/g' |
2911     awk 1 |
2912     less -JMKiCRS
2913 }
2914 
2915 # Weather Forecast
2916 wf() {
2917     printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 |
2918         awk '{ print; fflush() }' | less -JMKiCRS
2919 }
2920 
2921 # recursively find all files with trailing spaces/CRs
2922 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2923 
2924 # recursively find all files with trailing spaces/CRs
2925 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2926 
2927 # run `xargs`, using whole lines as extra arguments
2928 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
2929 
2930 # run `xargs`, using zero/null bytes as the extra-arguments terminator
2931 x0() { xargs -0 "$@"; }
2932 
2933 # run `xargs`, using whole lines as extra arguments
2934 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
2935 
2936 # show a calendar for the current Year, or for the year given
2937 y() {
2938     {
2939         # show the current date/time center-aligned
2940         printf "%20s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
2941             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
2942         # debian linux has a different `cal` app which highlights the day
2943         if [ -e "/usr/bin/ncal" ]; then
2944             # fix debian/ncal's weird way to highlight the current day
2945             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
2946         else
2947             cal -y "$@"
2948         fi
2949     } | less -JMKiCRS
2950 }
2951 
2952 # Youtube Audio Player
2953 yap() {
2954     local url
2955     # some youtube URIs end with extra playlist/tracker parameters
2956     url="$(echo "$1" | sed 's-&.*--')"
2957     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2958 }
2959 
2960 # show a calendar for the current YEAR, or for the year given
2961 year() {
2962     {
2963         # show the current date/time center-aligned
2964         printf "%20s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
2965             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
2966         # debian linux has a different `cal` app which highlights the day
2967         if [ -e "/usr/bin/ncal" ]; then
2968             # fix debian/ncal's weird way to highlight the current day
2969             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
2970         else
2971             cal -y "$@"
2972         fi
2973     } | less -JMKiCRS
2974 }
2975 
2976 # show the current date in the YYYY-MM-DD format
2977 ymd() { date +'%Y-%m-%d'; }
2978 
2979 # YouTube Url
2980 ytu() {
2981     local url
2982     # some youtube URIs end with extra playlist/tracker parameters
2983     url="$(echo "$1" | sed 's-&.*--')"
2984     [ $# -gt 0 ] && shift
2985     yt-dlp "$@" --get-url "${url}"
2986 }
2987 
2988 # . <(
2989 #     find "$(dirname $(which clam))" -type f -print0 |
2990 #         xargs -0 -n 1 basename |
2991 #         awk '{ print "unset " $0 }'
2992 # )