File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 case "$1" in
  55     -h|--h|-help|--help)
  56         # show help message, using the info-comment from this very script
  57         awk '
  58             /^case / { exit }
  59             /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print }
  60         ' "$0"
  61         exit 0
  62     ;;
  63 esac
  64 
  65 
  66 # dash doesn't support regex-matching syntax, forcing to use case statements
  67 case "$0" in
  68     -bash|-dash|-sh|bash|dash|sh)
  69         # script is being sourced with bash or dash, which is good
  70         :
  71     ;;
  72     *)
  73         case "$ZSH_EVAL_CONTEXT" in
  74             *:file)
  75                 # script is being sourced with zsh, which is good
  76                 :
  77             ;;
  78             *)
  79                 # script is being run normally, which is a waste of time
  80 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  81 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  82                 # failing during shell-startup may deny shell access, so exit
  83                 # with a 0 error-code to declare success
  84                 exit 0
  85             ;;
  86         esac
  87     ;;
  88 esac
  89 
  90 
  91 # n-column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  92 alias 1='bsbs 1'
  93 alias 2='bsbs 2'
  94 alias 3='bsbs 3'
  95 alias 4='bsbs 4'
  96 alias 5='bsbs 5'
  97 alias 6='bsbs 6'
  98 alias 7='bsbs 7'
  99 alias 8='bsbs 8'
 100 alias 9='bsbs 9'
 101 alias 0='bsbs 10'
 102 
 103 # alias a=avoid
 104 # alias c=cat
 105 # alias e=echo
 106 # alias f=fetch
 107 # alias g=get
 108 # alias h=naman
 109 # alias m=match
 110 # alias p=plain
 111 # alias q=quiet
 112 # alias r=reset
 113 # alias t=time
 114 # alias y=year
 115 
 116 # find name from the local `apt` database of installable packages
 117 # aptfind() {
 118 #     # despite warnings, the `apt search` command has been around for years
 119 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 120 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" |
 121 #         sed -u 's/^--$//' | less -JMKiCRS
 122 # }
 123 
 124 # emit each argument given as its own line of output
 125 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 126 
 127 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 128 # their basic ASCII counterparts, and where non-ASCII symbols become question
 129 # marks, one question mark for each code-point byte
 130 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 131 
 132 # avoid/ignore lines which match any of the regexes given
 133 avoid() {
 134     awk '
 135         BEGIN {
 136             for (i = 1; i < ARGC; i++) {
 137                 e[i] = ARGV[i]
 138                 delete ARGV[i]
 139             }
 140         }
 141 
 142         {
 143             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 144             print; fflush()
 145             got++
 146         }
 147 
 148         END { exit(got == 0) }
 149     ' "${@:-^\r?$}"
 150 }
 151 
 152 # AWK Begin
 153 # awkb() { awk "BEGIN { $1; exit }"; }
 154 
 155 # AWK Begin
 156 awkb() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 157 
 158 # emit a line with a repeating ball-like symbol in it
 159 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 160 
 161 # show an ansi-styled BANNER-like line
 162 # banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 163 
 164 # show an ansi-styled BANNER-like line
 165 banner() { printf "\e[7m%-$(tput cols)s\e[0m\n" "$*"; }
 166 
 167 # emit a colored bar which can help visually separate different outputs
 168 bar() {
 169     [ "${1:-80}" -gt 0 ] &&
 170         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 171 }
 172 
 173 # process Blocks/paragraphs of non-empty lines with AWK
 174 # bawk() { awk -F='' -v RS='' "$@"; }
 175 
 176 # process Blocks/paragraphs of non-empty lines with AWK
 177 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 178 
 179 # play a repeating and annoying high-pitched beep sound a few times a second,
 180 # lasting the number of seconds given, or for 1 second by default; uses my
 181 # script `waveout`
 182 beeps() {
 183     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 184     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 185 }
 186 
 187 # start by joining all arguments given as a tab-separated-items line of output,
 188 # followed by all lines from stdin verbatim
 189 begintsv() {
 190     awk '
 191         BEGIN {
 192             for (i = 1; i < ARGC; i++) {
 193                 if (i > 1) printf "\t"
 194                 printf "%s", ARGV[i]
 195                 delete ARGV[i]
 196             }
 197             if (ARGC > 1) printf "\n"
 198             fflush()
 199         }
 200         { print; fflush() }
 201     ' "$@"
 202 }
 203 
 204 # play a repeating synthetic-bell-like sound lasting the number of seconds
 205 # given, or for 1 second by default; uses my script `waveout`
 206 bell() {
 207     local f='sin(880*tau*u) * exp(-10*u)'
 208     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 209 }
 210 
 211 # play a repeating sound with synthetic-bells, lasting the number of seconds
 212 # given, or for 1 second by default; uses my script `waveout`
 213 bells() {
 214     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 215     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 216 }
 217 
 218 # Breathe Header: add an empty line after the first one (the header), then
 219 # separate groups of 5 lines (by default) with empty lines between them
 220 bh() {
 221     local n="${1:-5}"
 222     [ $# -gt 0 ] && shift
 223     awk -v n="$n" '
 224         BEGIN { if (n == 0) n = -1 }
 225         (NR - 1) % n == 1 && NR > 1 { print "" }
 226         { print; fflush() }
 227     ' "$@"
 228 }
 229 
 230 # recursively find all files with at least the number of bytes given; when
 231 # not given a minimum byte-count, the default is 100 binary megabytes
 232 bigfiles() {
 233     local n
 234     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 235     [ $# -gt 0 ] && shift
 236 
 237     local arg
 238     for arg in "${@:-.}"; do
 239         if [ ! -d "${arg}" ]; then
 240             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 241             return 1
 242         fi
 243         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 244     done
 245 }
 246 
 247 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 248 bl() {
 249     local n="${1:-5}"
 250     [ $# -gt 0 ] && shift
 251     awk -v n="$n" '
 252         BEGIN { if (n == 0) n = -1 }
 253         NR % n == 1 && NR != 1 { print "" }
 254         { print; fflush() }
 255     ' "$@"
 256 }
 257 
 258 # process BLocks/paragraphs of non-empty lines with AWK
 259 # blawk() { awk -F='' -v RS='' "$@"; }
 260 
 261 # process BLocks/paragraphs of non-empty lines with AWK
 262 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 263 
 264 # emit a line with a repeating block-like symbol in it
 265 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 266 
 267 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 268 # my script `bsbs`
 269 bman() {
 270     local w
 271     w="$(tput cols)"
 272     if [ "$w" -gt 100 ]; then
 273         w="$((w / 2 - 1))"
 274     fi
 275     MANWIDTH="$w" man "$@" | bsbs 2
 276 }
 277 
 278 # Begin-Only Awk
 279 # boa() { awk "BEGIN { $1; exit }"; }
 280 
 281 # Begin-Only Awk
 282 boa() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 283 
 284 # Begin-Only AWK
 285 # boawk() { awk "BEGIN { $1; exit }"; }
 286 
 287 # Begin-Only AWK
 288 boawk() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 289 
 290 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 291 # my script `bsbs`
 292 bookman() {
 293     local w
 294     w="$(tput cols)"
 295     if [ "$w" -gt 100 ]; then
 296         w="$((w / 2 - 1))"
 297     fi
 298     MANWIDTH="$w" man "$@" | bsbs 2
 299 }
 300 
 301 # split lines using the regex given, turning them into single-item lines
 302 breakdown() {
 303     local sep="${1:- }"
 304     [ $# -gt 0 ] && shift
 305     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 306 }
 307 
 308 # separate groups of 5 lines (by default) with empty lines
 309 breathe() {
 310     local n="${1:-5}"
 311     [ $# -gt 0 ] && shift
 312     awk -v n="$n" '
 313         BEGIN { if (n == 0) n = -1 }
 314         NR % n == 1 && NR != 1 { print "" }
 315         { print; fflush() }
 316     ' "$@"
 317 }
 318 
 319 # Browse Text
 320 bt() { less -JMKNiCRS "$@"; }
 321 
 322 # show a reverse-sorted tally of all lines read, where ties are sorted
 323 # alphabetically, and where trailing bullets are added to quickly make
 324 # the tally counts comparable at a glance
 325 bully() {
 326     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 327         # reassure users by instantly showing the header
 328         BEGIN { print "value\ttally\tbullets"; fflush() }
 329 
 330         { gsub(/\r$/, ""); tally[$0]++ }
 331 
 332         END {
 333             # find the max tally, which is needed to build the bullets-string
 334             max = 0
 335             for (k in tally) {
 336                 if (max < tally[k]) max = tally[k]
 337             }
 338 
 339             # make enough bullets for all tallies: this loop makes growing the
 340             # string a task with complexity O(n * log n), instead of a naive
 341             # O(n**2), which can slow-down things when tallies are high enough
 342             bullets = "•"
 343             for (n = max; n > 1; n /= 2) {
 344                 bullets = bullets bullets
 345             }
 346 
 347             # emit unsorted output lines to the sort cmd, which will emit the
 348             # final reverse-sorted tally lines
 349             for (k in tally) {
 350                 s = substr(bullets, 1, tally[k])
 351                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 352             }
 353         }
 354     ' "$@"
 355 }
 356 
 357 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 358 # second by default; uses my script `waveout`
 359 busy() {
 360     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 361     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 362     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 363     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 364 }
 365 
 366 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 367 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 368 
 369 # keep all BUT the LAST n lines, or skip just the last line by default
 370 butlast() { head -n -"${1:-1}" "${2:--}"; }
 371 
 372 # load bytes from the filenames given
 373 bytes() { cat "$@"; }
 374 
 375 # quick alias for `cat`
 376 c() { cat "$@"; }
 377 
 378 # CAlculator with Nice numbers runs my script `ca` and colors results with
 379 # my script `nn`, alternating styles to make long numbers easier to read
 380 can() { ca "$@" | nn --gray; }
 381 
 382 # uppercase the first letter on each line, and lowercase all later letters
 383 capitalize() {
 384     awk '{ print; fflush() }' "$@" | sed -E 's-^(.*)-\L\1-; s-^(.)-\u\1-'
 385 }
 386 
 387 # conCATenate Lines guarantees no lines are ever accidentally joined
 388 # across inputs, always emitting a line-feed at the end of every line
 389 # catl() { awk '{ print; fflush() }' "$@"; }
 390 
 391 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 392 # carriage-returns, and guarantees no lines are ever accidentally joined
 393 # across inputs, always emitting a line-feed at the end of every line
 394 catl() {
 395     awk '
 396         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 397         { gsub(/\r$/, ""); print; fflush() }
 398     ' "$@"
 399 }
 400 
 401 # Csv AWK: CSV-specific input settings for `awk`
 402 # cawk() { awk --csv "$@"; }
 403 
 404 # Csv AWK: CSV-specific input settings for `awk`
 405 cawk() { stdbuf -oL awk --csv "$@"; }
 406 
 407 # Compile C Stripped
 408 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 409 
 410 # center-align lines of text, using the current screen width
 411 center() {
 412     awk -v width="$(tput cols)" '
 413         {
 414             gsub(/\r$/, "")
 415             lines[NR] = $0
 416             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
 417             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
 418             l = length
 419             if (maxlen < l) maxlen = l
 420         }
 421 
 422         END {
 423             n = (width - maxlen) / 2
 424             if (n % 1) n = n - (n % 1)
 425             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
 426             for (i = 1; i <= NR; i++) printf fmt, "", lines[i]
 427         }
 428     ' "$@"
 429 }
 430 
 431 # Colored Go Test on the folder given; uses my command `gbmawk`
 432 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; }
 433 
 434 # ignore final life-feed from text, if it's the very last byte; also ignore
 435 # all trailing carriage-returns
 436 choplf() {
 437     awk '
 438         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 439         NR > 1 { print ""; fflush() }
 440         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 441     ' "$@"
 442 }
 443 
 444 # Color Json using the `jq` app, allowing an optional filepath as the data
 445 # source, and even an optional transformation formula
 446 cj() { jq -C "${2:-.}" "${1:--}"; }
 447 
 448 # clean the screen, after running the command given
 449 clean() {
 450     local res
 451     tput smcup
 452     "$@"
 453     res=$?
 454     tput rmcup
 455     return "${res}"
 456 }
 457 
 458 # show a live digital clock
 459 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 460 
 461 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 462 # also emitting colors when piped
 463 clrg() { rg --color=always --line-buffered "$@"; }
 464 
 465 # CLear Screen, like the old dos command of the same name
 466 cls() { clear; }
 467 
 468 # COunt COndition: count how many times the AWK expression given is true
 469 coco() {
 470     local cond="${1:-1}"
 471     [ $# -gt 0 ] && shift
 472     awk "
 473         { low = lower = tolower(\$0) }
 474         ${cond} { count++ }
 475         END { print count }
 476     " "$@"
 477 }
 478 
 479 # Colored RipGrep ensures app `rg` emits colors when piped
 480 crg() { rg --color=always --line-buffered "$@"; }
 481 
 482 # emit a line with a repeating cross-like symbol in it
 483 crosses() {
 484     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 485 }
 486 
 487 # split lines using the string given, turning them into single-item lines
 488 crumble() {
 489     local sep="${1:- }"
 490     [ $# -gt 0 ] && shift
 491     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 492 }
 493 
 494 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 495 csv2tsv() { xsv fmt -t '\t' "$@"; }
 496 
 497 # Change Units turns common US units into international ones; uses my
 498 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 499 cu() {
 500     bu "$@" | awk '
 501         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 502         NF == 4 && $NF != "s" { print $NF }
 503     ' | nn --gray
 504 }
 505 
 506 # CURL Silent spares you the progress bar, but still tells you about errors
 507 curls() { curl --show-error -s "$@"; }
 508 
 509 # Count With AWK: count the times the AWK expression/condition given is true
 510 cwawk() {
 511     local cond="${1:-1}"
 512     [ $# -gt 0 ] && shift
 513     awk "
 514         { low = lower = tolower(\$0) }
 515         ${cond} { count++ }
 516         END { print count }
 517     " "$@"
 518 }
 519 
 520 # listen to streaming DANCE music
 521 dance() {
 522     printf "streaming \e[7mDance Wave Retro\e[0m\n"
 523     # mpv --quiet https://retro.dancewave.online/retrodance.mp3
 524     mpv --really-quiet https://retro.dancewave.online/retrodance.mp3
 525 }
 526 
 527 # emit a line with a repeating dash-like symbol in it
 528 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 529 
 530 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 531 # the leading data-URI declaration, if present
 532 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 533 
 534 # DECAPitate (lines) emits the first line as is, piping all lines after that
 535 # to the command given, passing all/any arguments/options to it
 536 # decap() {
 537 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 538 # }
 539 
 540 # turn Comma-Separated-Values tables into tab-separated-values tables
 541 # decsv() { xsv fmt -t '\t' "$@"; }
 542 
 543 # DEDUPlicate prevents lines from appearing more than once
 544 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 545 
 546 # dictionary-DEFine the word given, using an online service
 547 def() {
 548     local arg
 549     local gap=0
 550     for arg in "$@"; do
 551         [ "${gap}" -gt 0 ] && printf "\n"
 552         gap=1
 553         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 554         curl -s "dict://dict.org/d:${arg}" | awk '
 555             { gsub(/\r$/, "") }
 556             /^151 / {
 557                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 558                 next
 559             }
 560             /^[1-9][0-9]{2} / {
 561                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 562                 next
 563             }
 564             { print; fflush() }
 565         '
 566     done | less -JMKiCRS
 567 }
 568 
 569 # dictionary-define the word given, using an online service
 570 define() {
 571     local arg
 572     local gap=0
 573     for arg in "$@"; do
 574         [ "${gap}" -gt 0 ] && printf "\n"
 575         gap=1
 576         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 577         curl -s "dict://dict.org/d:${arg}" | awk '
 578             { gsub(/\r$/, "") }
 579             /^151 / {
 580                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 581                 next
 582             }
 583             /^[1-9][0-9]{2} / {
 584                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 585                 next
 586             }
 587             { print; fflush() }
 588         '
 589     done | less -JMKiCRS
 590 }
 591 
 592 # DEcompress GZip-encoded data
 593 # degz() { zcat "$@"; }
 594 
 595 # turn JSON Lines into a proper json array
 596 dejsonl() { jq -s -M "${@:-.}"; }
 597 
 598 # delay lines from the standard-input, waiting the number of seconds given
 599 # for each line, or waiting 1 second by default
 600 # delay() {
 601 #     local seconds="${1:-1}"
 602 #     (
 603 #         IFS="$(printf "\n")"
 604 #         while read -r line; do
 605 #             sleep "${seconds}"
 606 #             printf "%s\n" "${line}"
 607 #         done
 608 #     )
 609 # }
 610 
 611 # convert lines of Space(s)-Separated Values into lines of tab-separated values
 612 dessv() {
 613     awk '
 614         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 615 
 616         {
 617             gsub(/\r$/, "")
 618             for (i = 1; i <= NF; i++) {
 619                 if (i > 1) printf "\t"
 620                 printf "%s", $i
 621             }
 622             printf "\n"; fflush()
 623         }
 624     ' "$@"
 625 }
 626 
 627 # expand tabs each into up to the number of space given, or 4 by default
 628 detab() { expand -t "${1:-4}"; }
 629 
 630 # ignore trailing spaces, as well as trailing carriage returns
 631 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 632 
 633 # turn UTF-16 data into UTF-8
 634 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 635 
 636 # DIVide 2 numbers 3 ways, including the complement
 637 div() {
 638     awk -v a="${1:-1}" -v b="${2:-1}" '
 639         BEGIN {
 640             gsub(/_/, "", a)
 641             gsub(/_/, "", b)
 642             if (a > b) { c = a; a = b; b = c }
 643             c = 1 - a / b
 644             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 645             else printf "%f\n%f\n", a / b, b / a
 646             exit
 647         }'
 648 }
 649 
 650 # get/fetch data from the filename or URI given; named `dog` because dogs can
 651 # `fetch` things for you
 652 # dog() {
 653 #     if [ $# -gt 1 ]; then
 654 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 655 #         return 1
 656 #     fi
 657 #
 658 #     if [ -e "$1" ]; then
 659 #         cat "$1"
 660 #         return $?
 661 #     fi
 662 #
 663 #     case "${1:--}" in
 664 #         -) cat -;;
 665 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 666 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 667 #         dict://*|telnet://*) curl --show-error -s "$1";;
 668 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 669 #         *) curl --show-error -s "https://$1";;
 670 #     esac 2> /dev/null || {
 671 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 672 #         return 1
 673 #     }
 674 # }
 675 
 676 # emit a line with a repeating dot-like symbol in it
 677 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 678 
 679 # ignore/remove all matched regexes given on all stdin lines
 680 drop() {
 681     awk '
 682         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 683         {
 684             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 685             print; fflush()
 686         }
 687     ' "${@:-\r$}"
 688 }
 689 
 690 # show the current Date and Time
 691 dt() {
 692     printf "\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n" \
 693         "$(date +'%a %b %d')" "$(date +%T)"
 694 }
 695 
 696 # show the current Date, Time, and a Calendar with the 3 `current` months
 697 dtc() {
 698     {
 699         # show the current date/time center-aligned
 700         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
 701             "" "$(date +'%a %b %d')" "$(date +%T)"
 702         # debian linux has a different `cal` app which highlights the day
 703         if [ -e "/usr/bin/ncal" ]; then
 704             # fix debian/ncal's weird way to highlight the current day
 705             ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
 706         else
 707             cal -3
 708         fi
 709     } | less -JMKiCRS
 710 }
 711 
 712 # quick alias for `echo`
 713 e() { echo "$@"; }
 714 
 715 e4() { expand -t 4 "$@"; }
 716 
 717 e8() { expand -t 8 "$@"; }
 718 
 719 # Evaluate Awk expression
 720 ea() {
 721     local expr="${1:-0}"
 722     [ $# -gt 0 ] && shift
 723     awk "BEGIN { print ${expr}; exit }" "$@"
 724 }
 725 
 726 # EDit RUN shell commands, using an interactive editor
 727 edrun() { . <( micro -readonly true -filetype shell | leak --inv ); }
 728 
 729 # Extended-mode Grep, enabling its full regex syntax
 730 eg() { grep -E --line-buffered "$@"; }
 731 
 732 # Extended Grep, Recursive Interactive and Plain
 733 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 734 
 735 # show all empty files in a folder, digging recursively
 736 emptyfiles() {
 737     local arg
 738     for arg in "${@:-.}"; do
 739         if [ ! -d "${arg}" ]; then
 740             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 741             return 1
 742         fi
 743         stdbuf -oL find "${arg}" -type f -empty
 744     done
 745 }
 746 
 747 # show all empty folders in a folder, digging recursively
 748 emptyfolders() {
 749     local arg
 750     for arg in "${@:-.}"; do
 751         if [ ! -d "${arg}" ]; then
 752             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 753             return 1
 754         fi
 755         stdbuf -oL find "${arg}" -type d -empty
 756     done
 757 }
 758 
 759 # Evaluate Nodejs expression
 760 # en() {
 761 #     local expr="${1:-null}"
 762 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 763 #     node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 764 # }
 765 
 766 # Evaluate Python expression
 767 ep() { python -c "print(${1:-None})"; }
 768 
 769 # Extended Plain Interactive Grep
 770 epig() { ugrep --color=never -Q -E "$@"; }
 771 
 772 # Extended Plain Recursive Interactive Grep
 773 eprig() { ugrep --color=never -Q -E "$@"; }
 774 
 775 # Evaluate Ruby expression
 776 # er() { ruby -e "puts ${1:-nil}"; }
 777 
 778 # Edit Run shell commands, using an interactive editor
 779 er() { . <( micro -readonly true -filetype shell | leak --inv ); }
 780 
 781 # ignore/remove all matched regexes given on all stdin lines
 782 erase() {
 783     awk '
 784         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 785         {
 786             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 787             print; fflush()
 788         }
 789     ' "${@:-\r$}"
 790 }
 791 
 792 # Editor Read-Only
 793 ero() { micro -readonly true "$@"; }
 794 
 795 # Extended-mode Sed, enabling its full regex syntax
 796 es() { sed -E -u "$@"; }
 797 
 798 # Expand Tabs each into up to the number of space given, or 4 by default
 799 et() { expand -t "${1:-4}"; }
 800 
 801 # convert EURos into CAnadian Dollars, using the latest official exchange
 802 # rates from the bank of canada; during weekends, the latest rate may be
 803 # from a few days ago; the default amount of euros to convert is 1, when
 804 # not given
 805 eur2cad() {
 806     local site='https://www.bankofcanada.ca/valet/observations/group'
 807     local csv_rates="${site}/FX_RATES_DAILY/csv"
 808     local url
 809     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 810     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 811         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 812         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 813 }
 814 
 815 # EValuate AWK expression
 816 evawk() {
 817     local expr="${1:-0}"
 818     [ $# -gt 0 ] && shift
 819     awk "BEGIN { print ${expr}; exit }" "$@"
 820 }
 821 
 822 # get various currency EXchange RATES
 823 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/$1"; }
 824 
 825 # get various currency EXchange RATES
 826 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-USD}"; }
 827 
 828 # get various currency EXchange RATES
 829 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-EUR}"; }
 830 
 831 # get various currency EXchange RATES
 832 exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-CAD}"; }
 833 
 834 # convert fahrenheit into celsius
 835 fahrenheit() {
 836     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 837         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 838 }
 839 
 840 # Flushed AWK
 841 fawk() { stdbuf -oL awk "$@"; }
 842 
 843 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 844 fetch() {
 845     local a
 846     for a in "$@"; do
 847         case "$a" in
 848             file://*|https://*|http://*) curl --show-error -s "$a";;
 849             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 850             dict://*|telnet://*) curl --show-error -s "$a";;
 851             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 852             *) curl --show-error -s "https://$a";;
 853         esac
 854     done
 855 }
 856 
 857 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 858 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 859 
 860 # show all files in a folder, digging recursively
 861 files() {
 862     local arg
 863     for arg in "${@:-.}"; do
 864         if [ ! -d "${arg}" ]; then
 865             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 866             return 1
 867         fi
 868         stdbuf -oL find "${arg}" -type f
 869     done
 870 }
 871 
 872 # recursively find all files with fewer bytes than the number given
 873 filesunder() {
 874     local n
 875     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 876     [ $# -gt 0 ] && shift
 877 
 878     local arg
 879     for arg in "${@:-.}"; do
 880         if [ ! -d "${arg}" ]; then
 881             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 882             return 1
 883         fi
 884         stdbuf -oL find "${arg}" -type f -size -"$n"c
 885     done
 886 }
 887 
 888 # get the first n lines, or 1 by default
 889 first() { head -n "${1:-1}" "${2:--}"; }
 890 
 891 # limit data up to the first n bytes
 892 firstbytes() { head -c "$1" "${2:--}"; }
 893 
 894 # get the first n lines, or 1 by default
 895 firstlines() { head -n "${1:-1}" "${2:--}"; }
 896 
 897 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 898 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 899 # and ensuring each input's last line ends with a line-feed
 900 fixlines() {
 901     awk '
 902         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 903         { gsub(/\r$/, ""); print; fflush() }
 904     ' "$@"
 905 }
 906 
 907 # FLushed AWK
 908 # flawk() { stdbuf -oL awk "$@"; }
 909 
 910 # First Line AWK, emits the first line as is, and uses the rest of the args
 911 # given by injecting the first into the script, and passing all later args as
 912 # later args to `awk` as given
 913 flawk() {
 914     local code="${1:-1}"
 915     [ $# -gt 0 ] && shift
 916     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 917 }
 918 
 919 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 920 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 921 # involving several steps
 922 fleak() {
 923     awk '
 924         {
 925             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 926             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 927             print; fflush()
 928         }
 929     ' "$@"
 930 }
 931 
 932 # try to run the command given using line-buffering for its (standard) output
 933 flushlines() { stdbuf -oL "$@"; }
 934 
 935 # show all folders in a folder, digging recursively
 936 folders() {
 937     local arg
 938     for arg in "${@:-.}"; do
 939         if [ ! -d "${arg}" ]; then
 940             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 941             return 1
 942         fi
 943         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 944     done
 945 }
 946 
 947 # start from the line number given, skipping all previous ones
 948 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 949 
 950 # convert FeeT into meters
 951 ft() {
 952     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 953         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 954 }
 955 
 956 # convert FeeT² (squared) into meters²
 957 ft2() {
 958     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 959         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 960 }
 961 
 962 # Get/fetch data from the filenames/URIs given; uses my script `get`
 963 # g() { get "$@"; }
 964 
 965 # run `grep` in extended-regex mode, enabling its full regex syntax
 966 # g() { grep -E --line-buffered "$@"; }
 967 
 968 # convert GALlons into liters
 969 gal() {
 970     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 971         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 972 }
 973 
 974 # convert binary GigaBytes into bytes
 975 gb() {
 976     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 977         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 978         sed 's-\.00*$--'
 979 }
 980 
 981 # glue/stick together various lines, only emitting a line-feed at the end; an
 982 # optional argument is the output-item-separator, which is empty by default
 983 glue() {
 984     local sep="${1:-}"
 985     [ $# -gt 0 ] && shift
 986     awk -v sep="${sep}" '
 987         NR > 1 { printf "%s", sep }
 988         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 989         END { if (NR > 0) print ""; fflush() }
 990     ' "$@"
 991 }
 992 
 993 # GO Build Stripped: a common use-case for the go compiler
 994 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 995 
 996 # GO DEPendencieS: show all dependencies in a go project
 997 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 998 
 999 # GO IMPortS: show all imports in a go project
1000 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
1001 
1002 # go to the folder picked using an interactive TUI; uses my script `bf`
1003 goto() {
1004     local where
1005     where="$(bf "${1:-.}")"
1006     if [ $? -ne 0 ]; then
1007         return 0
1008     fi
1009 
1010     where="$(realpath "${where}")"
1011     if [ ! -d "${where}" ]; then
1012         where="$(dirname "${where}")"
1013     fi
1014     cd "${where}" || return
1015 }
1016 
1017 # GRayed-out lines with AWK
1018 grawk() {
1019     local cond="${1:-1}"
1020     [ $# -gt 0 ] && shift
1021     awk "${cond}"' {
1022             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
1023             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
1024             next
1025         }
1026         { print; fflush() }
1027     ' "$@"
1028 }
1029 
1030 # Style lines using a GRAY-colored BACKground
1031 grayback() {
1032     awk '
1033         {
1034             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
1035             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
1036         }
1037     ' "$@"
1038 }
1039 
1040 # Grep, Recursive Interactive and Plain
1041 # grip() { ugrep -r -Q --color=never -E "$@"; }
1042 
1043 # Global extended regex SUBstitute, using the AWK function of the same name:
1044 # arguments are used as regex/replacement pairs, in that order
1045 gsub() {
1046     awk '
1047         BEGIN {
1048             for (i = 1; i < ARGC; i++) {
1049                 args[++n] = ARGV[i]
1050                 delete ARGV[i]
1051             }
1052         }
1053         {
1054             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1055             print; fflush()
1056         }
1057     ' "$@"
1058 }
1059 
1060 # show Help laid out on 2 side-by-side columns; uses my script `bsbs`
1061 h2() { naman "$@" | bsbs 2; }
1062 
1063 # Highlight (lines) with AWK
1064 hawk() {
1065     local cond="${1:-1}"
1066     [ $# -gt 0 ] && shift
1067     awk '
1068         { low = lower = tolower($0) }
1069         '"${cond}"' {
1070             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1071             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1072             next
1073         }
1074         { print; fflush() }
1075     ' "$@"
1076 }
1077 
1078 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1079 # second by default; uses my script `waveout`
1080 heartbeat() {
1081     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1082     local b='((12, u), (8, (u-0.25)%1))'
1083     local f="sum($a for v in $b) / 2"
1084     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1085     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1086     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1087 }
1088 
1089 # Highlighted-style ECHO
1090 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1091 
1092 # show each byte as a pair of HEXadecimal (base-16) symbols
1093 hexify() {
1094     cat "$@" | od -x -A n |
1095         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1096 }
1097 
1098 # HIghlighted-style ECHO
1099 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1100 
1101 # highlight lines
1102 highlight() {
1103     awk '
1104         {
1105             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1106             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1107         }
1108     ' "$@"
1109 }
1110 
1111 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1112 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1113 # involving several steps
1114 hileak() {
1115     awk '
1116         {
1117             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1118             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1119             print; fflush()
1120         }
1121     ' "$@"
1122 }
1123 
1124 # highlight lines
1125 hilite() {
1126     awk '
1127         {
1128             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1129             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1130         }
1131     ' "$@"
1132 }
1133 
1134 # Help Me Remember my custom shell commands
1135 hmr() {
1136     local cmd="bat"
1137     # debian linux uses a different name for the `bat` app
1138     if [ -e "/usr/bin/batcat" ]; then
1139         cmd="batcat"
1140     fi
1141 
1142     "$cmd" \
1143         --style=plain,header,numbers --theme='Monokai Extended Light' \
1144         --wrap=never --color=always "$(which clam)" |
1145             sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1146 }
1147 
1148 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1149 hms() {
1150     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1151         x = $0
1152         h = (x - x % 3600) / 3600
1153         m = (x % 3600) / 60
1154         s = x % 60
1155         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1156     }'
1157 }
1158 
1159 # find all hyperlinks inside HREF attributes in the input text
1160 href() {
1161     awk '
1162         BEGIN { e = "href=\"[^\"]+\"" }
1163         {
1164             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1165                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1166             }
1167         }
1168     ' "$@"
1169 }
1170 
1171 # Index all lines starting from 0, using a tab right after each line number
1172 # i() {
1173 #     local start="${1:-0}"
1174 #     [ $# -gt 0 ] && shift
1175 #     nl -b a -w 1 -v "${start}" "$@"
1176 # }
1177 
1178 # Index all lines starting from 0, using a tab right after each line number
1179 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1180 
1181 # avoid/ignore lines which case-insensitively match any of the regexes given
1182 iavoid() {
1183     awk '
1184         BEGIN {
1185             if (IGNORECASE == "") {
1186                 m = "this variant of AWK lacks case-insensitive regex-matching"
1187                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1188                 exit 125
1189             }
1190             IGNORECASE = 1
1191 
1192             for (i = 1; i < ARGC; i++) {
1193                 e[i] = ARGV[i]
1194                 delete ARGV[i]
1195             }
1196         }
1197 
1198         {
1199             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1200             print; fflush(); got++
1201         }
1202 
1203         END { exit(got == 0) }
1204     ' "${@:-^\r?$}"
1205 }
1206 
1207 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1208 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1209 
1210 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1211 idrop() {
1212     awk '
1213         BEGIN {
1214             if (IGNORECASE == "") {
1215                 m = "this variant of AWK lacks case-insensitive regex-matching"
1216                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1217                 exit 125
1218             }
1219             IGNORECASE = 1
1220 
1221             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1222         }
1223 
1224         {
1225             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1226             print; fflush()
1227         }
1228     ' "${@:-\r$}"
1229 }
1230 
1231 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1232 ierase() {
1233     awk '
1234         BEGIN {
1235             if (IGNORECASE == "") {
1236                 m = "this variant of AWK lacks case-insensitive regex-matching"
1237                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1238                 exit 125
1239             }
1240             IGNORECASE = 1
1241 
1242             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1243         }
1244 
1245         {
1246             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1247             print; fflush()
1248         }
1249     ' "${@:-\r$}"
1250 }
1251 
1252 # ignore command in a pipe: this allows quick re-editing of pipes, while
1253 # still leaving signs of previously-used steps, as a memo
1254 ignore() { cat; }
1255 
1256 # only keep lines which case-insensitively match any of the regexes given
1257 imatch() {
1258     awk '
1259         BEGIN {
1260             if (IGNORECASE == "") {
1261                 m = "this variant of AWK lacks case-insensitive regex-matching"
1262                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1263                 exit 125
1264             }
1265             IGNORECASE = 1
1266 
1267             for (i = 1; i < ARGC; i++) {
1268                 e[i] = ARGV[i]
1269                 delete ARGV[i]
1270             }
1271         }
1272 
1273         {
1274             for (i = 1; i < ARGC; i++) {
1275                 if ($0 ~ e[i]) {
1276                     print; fflush()
1277                     got++
1278                     next
1279                 }
1280             }
1281         }
1282 
1283         END { exit(got == 0) }
1284     ' "${@:-[^\r]}"
1285 }
1286 
1287 # start each non-empty line with extra n spaces
1288 indent() {
1289     awk '
1290         BEGIN {
1291             n = ARGV[1] + 0
1292             delete ARGV[1]
1293             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
1294         }
1295 
1296         /^\r?$/ { print ""; fflush(); next }
1297         { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() }
1298     ' "$@"
1299 }
1300 
1301 # listen to INTENSE streaming radio
1302 intense() {
1303     printf "streaming \e[7mIntense Radio\e[0m\n"
1304     mpv --quiet https://secure.live-streams.nl/flac.flac
1305 }
1306 
1307 # show public-IP-related INFOrmation
1308 # ipinfo() { curl -s ipinfo.io; }
1309 
1310 # show public-IP-related INFOrmation
1311 ipinfo() { curl -s ipinfo.io | jq; }
1312 
1313 # emit each word-like item from each input line on its own line; when a file
1314 # has tabs on its first line, items are split using tabs alone, which allows
1315 # items to have spaces in them
1316 items() {
1317     awk '
1318         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
1319         { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() }
1320     ' "$@"
1321 }
1322 
1323 # case-insensitively deduplicate lines, keeping them in their original order:
1324 # the checking/matching is case-insensitive, but each first match is output
1325 # exactly as is
1326 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1327 
1328 # shrink/compact Json data, allowing an optional filepath
1329 # j0() { python -m json.tool --compact "${1:--}"; }
1330 
1331 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1332 # even an optional transformation formula after that
1333 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1334 
1335 # show Json data on multiple lines, using 2 spaces for each indentation level,
1336 # allowing an optional filepath
1337 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1338 
1339 # show Json data on multiple lines, using 2 spaces for each indentation level,
1340 # allowing an optional filepath, and even an optional transformation formula
1341 # after that
1342 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1343 
1344 # listen to streaming JAZZ music
1345 jazz() {
1346     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1347     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1348     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1349 }
1350 
1351 # show a `dad` JOKE from the web, sometimes even a very funny one
1352 # joke() {
1353 #     curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--'
1354 #     # plain-text output from previous cmd doesn't end with a line-feed
1355 #     printf "\n"
1356 # }
1357 
1358 # show a `dad` JOKE from the web, sometimes even a very funny one
1359 joke() {
1360     curl --show-error -s https://icanhazdadjoke.com | fold -s |
1361         awk '{ gsub(/ *\r?$/, ""); print }'
1362 }
1363 
1364 # shrink/compact JSON data, allowing an optional filepath
1365 # json0() { python -m json.tool --compact "${1:--}"; }
1366 
1367 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1368 # even an optional transformation formula after that
1369 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1370 
1371 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1372 # allowing an optional filepath
1373 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1374 
1375 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1376 # allowing an optional filepath, and even an optional transformation formula
1377 # after that
1378 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1379 
1380 # turn JSON Lines into a proper JSON array
1381 jsonl2json() { jq -s -M "${@:-.}"; }
1382 
1383 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1384 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1385 
1386 # only keep the file-extension part from lines ending with file-extensions
1387 # justext() {
1388 #     awk '
1389 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1390 #     ' "$@"
1391 # }
1392 
1393 # only keep the file-extension part from lines ending with file-extensions
1394 justext() {
1395     awk '
1396         !/^\./ && /\./ {
1397             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1398                 print substr($0, RSTART, RLENGTH); fflush()
1399             }
1400         }
1401     ' "$@"
1402 }
1403 
1404 # only keep lines ending with a file-extension of any popular picture format
1405 justpictures() {
1406     awk '
1407         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1408             gsub(/ *\r?$/, ""); print; fflush()
1409         }
1410     ' "$@"
1411 }
1412 
1413 # only keep lines ending with a file-extension of any popular sound format
1414 justsounds() {
1415     awk '
1416         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1417             gsub(/ *\r?$/, ""); print; fflush()
1418         }
1419     ' "$@"
1420 }
1421 
1422 # only keep lines ending with a file-extension of any popular video format
1423 justvideos() {
1424     awk '
1425         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1426             gsub(/ *\r?$/, ""); print; fflush()
1427         }
1428     ' "$@"
1429 }
1430 
1431 # convert binary KiloBytes into bytes
1432 kb() {
1433     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1434         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1435         sed 's-\.00*$--'
1436 }
1437 
1438 # run `less`, showing line numbers, among other settings
1439 l() { less -JMKNiCRS "$@"; }
1440 
1441 # Like A Book groups lines as 2 side-by-side pages, the same way books
1442 # do it; uses my script `book`
1443 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1444 
1445 # find the LAN (local-area network) IP address for this device
1446 lanip() { hostname -I; }
1447 
1448 # Line xARGS: `xargs` using line separators, which handles filepaths
1449 # with spaces, as long as the standard input has 1 path per line
1450 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; }
1451 
1452 # Line xARGS: `xargs` using line separators, which handles filepaths
1453 # with spaces, as long as the standard input has 1 path per line
1454 largs() {
1455     awk -v ORS='\000' '
1456         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1457         { gsub(/\r$/, ""); print; fflush() }
1458     ' | xargs -0 "$@"
1459 }
1460 
1461 # get the last n lines, or 1 by default
1462 last() { tail -n "${1:-1}" "${2:--}"; }
1463 
1464 # get up to the last given number of bytes
1465 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1466 
1467 # get the last n lines, or 1 by default
1468 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1469 
1470 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1471 # as given, and where all other symbols become question marks, one question
1472 # mark for each code-point byte
1473 latinize() {
1474     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1475 }
1476 
1477 # Lowercased (lines) AWK
1478 lawk() {
1479     local code="${1:-1}"
1480     [ $# -gt 0 ] && shift
1481     awk "
1482         {
1483             line = orig = original = \$0
1484             low = lower = tolower(\$0)
1485             \$0 = lower
1486         }
1487         ${code}
1488         { fflush() }
1489     " "$@";
1490 }
1491 
1492 # convert pounds (LB) into kilograms
1493 lb() {
1494     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1495         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1496 }
1497 
1498 # turn the first n space-separated fields on each line into tab-separated
1499 # ones; this behavior is useful to make the output of many cmd-line tools
1500 # into TSV, since filenames are usually the last fields, and these may
1501 # contain spaces which aren't meant to be split into different fields
1502 leadtabs() {
1503     local n="$1"
1504     local cmd
1505     cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1506     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1507     sed -u -E "${cmd}"
1508 }
1509 
1510 # run `less`, showing line numbers, among other settings
1511 least() { less -JMKNiCRS "$@"; }
1512 
1513 # limit stops at the first n bytes, or 1024 bytes by default
1514 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1515 
1516 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1517 # and using the first n lines as a sticky-header (1 by default), so they
1518 # always show on top
1519 lh() {
1520     local n="${1:-1}"
1521     [ $# -gt 0 ] && shift
1522     less --header="$n" -JMKNiCRS "$@"
1523 }
1524 
1525 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1526 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1527 # and ensuring each input's last line ends with a line-feed
1528 lines() {
1529     awk '
1530         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1531         { gsub(/\r$/, ""); print; fflush() }
1532     ' "$@"
1533 }
1534 
1535 # regroup adjacent lines into n-item tab-separated lines
1536 lineup() {
1537     local n="${1:-0}"
1538     [ $# -gt 0 ] && shift
1539 
1540     if [ "$n" -le 0 ]; then
1541         awk '
1542             NR > 1 { printf "\t" }
1543             { printf "%s", $0; fflush() }
1544             END { if (NR > 0) print "" }
1545         ' "$@"
1546         return $?
1547     fi
1548 
1549     awk -v n="$n" '
1550         NR % n != 1 && n > 1 { printf "\t" }
1551         { printf "%s", $0; fflush() }
1552         NR % n == 0 { print ""; fflush() }
1553         END { if (NR % n != 0) print "" }
1554     ' "$@"
1555 }
1556 
1557 # find all hyperLINKS (https:// and http://) in the input text
1558 links() {
1559     awk '
1560         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1561         {
1562             # match all links in the current line
1563             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1564                 print substr(s, RSTART, RLENGTH); fflush()
1565             }
1566         }
1567     ' "$@"
1568 }
1569 
1570 # List files, using the `Long` option
1571 # ll() { ls -l "$@"; }
1572 
1573 # LOAD data from the filename or URI given; uses my script `get`
1574 load() { get "$@"; }
1575 
1576 # LOwercase line, check (awk) COndition: on each success, the original line
1577 # is output with its original letter-casing, as its lower-cased version is
1578 # only a convenience meant for the condition
1579 loco() {
1580     local cond="${1:-1}"
1581     [ $# -gt 0 ] && shift
1582     awk "
1583         {
1584             line = orig = original = \$0
1585             low = lower = tolower(\$0)
1586             \$0 = lower
1587         }
1588         ${cond} { print line; fflush() }
1589     " "$@"
1590 }
1591 
1592 # LOcal SERver webserves files in a folder as localhost, using the port
1593 # number given, or port 8080 by default
1594 loser() {
1595     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1596     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1597 }
1598 
1599 # LOWercase all ASCII symbols
1600 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1601 
1602 # LOWERcase all ASCII symbols
1603 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1604 
1605 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1606 lrg() { rg --line-buffered "$@"; }
1607 
1608 # Listen To Youtube
1609 lty() {
1610     local url
1611     # some youtube URIs end with extra playlist/tracker parameters
1612     url="$(echo "$1" | sed 's-&.*--')"
1613     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1614 }
1615 
1616 # only keep lines which match any of the regexes given
1617 match() {
1618     awk '
1619         BEGIN {
1620             for (i = 1; i < ARGC; i++) {
1621                 e[i] = ARGV[i]
1622                 delete ARGV[i]
1623             }
1624         }
1625 
1626         {
1627             for (i = 1; i < ARGC; i++) {
1628                 if ($0 ~ e[i]) {
1629                     print; fflush()
1630                     got++
1631                     next
1632                 }
1633             }
1634         }
1635 
1636         END { exit(got == 0) }
1637     ' "${@:-[^\r]}"
1638 }
1639 
1640 # MAX Width truncates lines up to the given number of items/bytes given, or up
1641 # to 80 by default; output lines end with an ANSI reset-code, in case input
1642 # lines use ANSI styles
1643 maxw() {
1644     local maxwidth="${1:-80}"
1645     [ $# -gt 0 ] && shift
1646     awk -v maxw="${maxwidth}" '
1647         {
1648             gsub(/\r$/, "")
1649             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1650         }
1651     ' "$@"
1652 }
1653 
1654 # convert binary MegaBytes into bytes
1655 mb() {
1656     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1657         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1658         sed 's-\.00*$--'
1659 }
1660 
1661 # Multi-Core MAKE runs `make` using all cores
1662 mcmake() { make -j "$(nproc)" "$@"; }
1663 
1664 # Multi-Core MaKe runs `make` using all cores
1665 mcmk() { make -j "$(nproc)" "$@"; }
1666 
1667 # merge stderr into stdout, without any ugly keyboard-dancing
1668 # merrge() { "$@" 2>&1; }
1669 
1670 # convert MIles into kilometers
1671 mi() {
1672     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1673         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1674 }
1675 
1676 # convert MIles² (squared) into kilometers²
1677 mi2() {
1678     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1679         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1680 }
1681 
1682 # Make In Folder
1683 mif() {
1684     local code
1685     pushd "${1:-.}" > /dev/null || return
1686     [ $# -gt 0 ] && shift
1687     make "$@"
1688     code=$?
1689     popd > /dev/null || return "${code}"
1690     return "${code}"
1691 }
1692 
1693 # Media INFO
1694 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1695 
1696 # Media INFO
1697 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1698 
1699 # run `make`
1700 mk() { make "$@"; }
1701 
1702 # convert Miles Per Hour into kilometers per hour
1703 mph() {
1704     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1705         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1706 }
1707 
1708 # Number all lines, using a tab right after each line number
1709 # n() {
1710 #     local start="${1:-1}"
1711 #     [ $# -gt 0 ] && shift
1712 #     nl -b a -w 1 -v "${start}" "$@"
1713 # }
1714 
1715 # Number all lines, using a tab right after each line number
1716 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1717 
1718 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1719 naman() {
1720     local w
1721     w="$(tput cols)"
1722     if [ "$w" -gt 100 ]; then
1723         w="$((w / 2 - 1))"
1724     fi
1725     MANWIDTH="$w" man "$@"
1726 }
1727 
1728 # Not AND sorts its 2 inputs, then finds lines not in common
1729 nand() {
1730     # comm -3 <(sort "$1") <(sort "$2")
1731     # dash doesn't support the process-sub syntax
1732     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1733 }
1734 
1735 # Nice Byte Count, using my scripts `nn` and `cext`
1736 nbc() { wc -c "$@" | nn --gray | cext; }
1737 
1738 # listen to streaming NEW WAVE music
1739 newwave() {
1740     printf "streaming \e[7mNew Wave radio\e[0m\n"
1741     mpv --quiet https://puma.streemlion.com:2910/stream
1742 }
1743 
1744 # NIce(r) COlumns makes the output of many commands whose output starts with
1745 # a header line easier to read; uses my script `nn`
1746 nico() {
1747     awk '
1748         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1749         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1750     ' "$@" | nn --gray | less -JMKiCRS
1751 }
1752 
1753 # emit nothing to output and/or discard everything from input
1754 nil() {
1755     if [ $# -gt 0 ]; then
1756         "$@" > /dev/null
1757     else
1758         cat < /dev/null
1759     fi
1760 }
1761 
1762 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1763 njnn() { nj "$@" | nn --gray; }
1764 
1765 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1766 nman() {
1767     local w
1768     w="$(tput cols)"
1769     if [ "$w" -gt 100 ]; then
1770         w="$((w / 2 - 1))"
1771     fi
1772     MANWIDTH="$w" man "$@"
1773 }
1774 
1775 # convert Nautical MIles into kilometers
1776 nmi() {
1777     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1778         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1779 }
1780 
1781 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1782 # noerr() { "$@" 2> /dev/null; }
1783 
1784 # play a white-noise sound lasting the number of seconds given, or for 1
1785 # second by default; uses my script `waveout`
1786 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1787 
1788 # ignore trailing spaces, as well as trailing carriage returns
1789 notrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
1790 
1791 # show the current date and time
1792 now() { date +'%Y-%m-%d %H:%M:%S'; }
1793 
1794 # Nice Processes shows/lists all current processes; uses my script `nn`
1795 np() {
1796     local res
1797     local code
1798     # res="$(ps "${@:-auxf}")"
1799     res="$(ps "${@:-aux}")"
1800     code=$?
1801     if [ "${code}" -ne 0 ]; then
1802         return "${code}"
1803     fi
1804 
1805     echo "${res}" | awk '
1806         BEGIN {
1807             d = strftime("%a %b %d")
1808             t = strftime("%H:%M:%S")
1809             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1810         }
1811 
1812         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1813 
1814         $1 == "root" {
1815             gsub(/^/, "\x1b[38;2;52;101;164m")
1816             gsub(/ +/, "&\x1b[0m\x1b[38;2;52;101;164m")
1817             gsub(/$/, "\x1b[0m")
1818         }
1819 
1820         {
1821             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1822             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1823             printf "%3d  %s\n", NR - 1, $0
1824         }
1825     ' | nn --gray | less -JMKiCRS
1826 }
1827 
1828 # Nice Size, using my scripts `nn` and `cext`
1829 ns() { wc -c "$@" | nn --gray | cext; }
1830 
1831 # Nice Transform Json, using my scripts `tj`, and `nj`
1832 ntj() { tj "$@" | nj; }
1833 
1834 # Nice TimeStamp
1835 nts() {
1836     ts '%Y-%m-%d %H:%M:%S' |
1837         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1838 }
1839 
1840 # emit nothing to output and/or discard everything from input
1841 null() {
1842     if [ $# -gt 0 ]; then
1843         "$@" > /dev/null
1844     else
1845         cat < /dev/null
1846     fi
1847 }
1848 
1849 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1850 # line-feed byte
1851 nullines() {
1852     awk -v ORS='\000' '
1853         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1854         { gsub(/\r$/, ""); print; fflush() }
1855     ' "$@"
1856 }
1857 
1858 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1859 # the syntax of shell functions
1860 nwat() {
1861     local a
1862     local gap=0
1863 
1864     if [ $# -eq 0 ]; then
1865         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1866         return 1
1867     fi
1868 
1869     local cmd="bat"
1870     # debian linux uses a different name for the `bat` app
1871     if [ -e "/usr/bin/batcat" ]; then
1872         cmd="batcat"
1873     fi
1874 
1875     for a in "$@"; do
1876         [ "${gap}" -gt 0 ] && printf "\n"
1877         gap=1
1878         # printf "\e[7m%-80s\e[0m\n" "$a"
1879         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1880 
1881         # resolve 1 alias level
1882         if alias "$a" 2> /dev/null > /dev/null; then
1883             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1884         fi
1885 
1886         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1887             # resolved aliases with args/spaces in them would otherwise fail
1888             echo "$a"
1889         elif whence -f "$a" > /dev/null 2> /dev/null; then
1890             # zsh seems to show a shell function's code only via `whence -f`
1891             whence -f "$a"
1892         elif type "$a" > /dev/null 2> /dev/null; then
1893             # dash doesn't support `declare`, and `type` in bash emits
1894             # a redundant first output line, when it's a shell function
1895             type "$a" | awk '
1896                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1897                 { print; fflush() }
1898                 END { if (NR < 2 && skipped) print skipped }
1899             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1900                 --wrap=never --color=always |
1901                     sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1902         else
1903             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1904         fi
1905     done | less -JMKiCRS
1906 }
1907 
1908 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1909 # alternating styles to make long numbers easier to read
1910 # nwc() { wc "$@" | nn --gray; }
1911 
1912 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1913 # alternating styles to make long numbers easier to read
1914 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1915 
1916 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1917 # `cext`, alternating styles to make long numbers easier to read
1918 nwc() {
1919     wc "$@" | sort -rn | nn --gray | cext |
1920         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1921 }
1922 
1923 # Nice Weather Forecast
1924 nwf() {
1925     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
1926     curl --show-error -s telnet://graph.no:79 |
1927     sed -E \
1928         -e 's/ *\r?$//' \
1929         -e '/^\[/d' \
1930         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
1931         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
1932         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
1933         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
1934         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
1935         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
1936         -e 's/\*/○/g' |
1937     awk 1 |
1938     less -JMKiCRS
1939 }
1940 
1941 # Nice Zoom Json, using my scripts `zj`, and `nj`
1942 nzj() { zj "$@" | nj; }
1943 
1944 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1945 # pawk() { awk -F='' -v RS='' "$@"; }
1946 
1947 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1948 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1949 
1950 # Plain `fd`
1951 pfd() { fd --color=never "$@"; }
1952 
1953 # pick lines, using all the 1-based line-numbers given
1954 picklines() {
1955     awk '
1956         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1957         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1958         { l[++n] = $0 }
1959         END {
1960             for (i = 1; i <= m; i++) {
1961                 j = p[i]
1962                 if (j < 0) j += NR + 1
1963                 if (0 < j && j <= NR) print l[j]
1964             }
1965         }
1966     ' "$@"
1967 }
1968 
1969 # Plain Interactive Grep
1970 pig() { ugrep --color=never -Q -E "$@"; }
1971 
1972 # make text plain, by ignoring ANSI terminal styling
1973 plain() {
1974     awk '
1975         {
1976             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1977             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
1978             print; fflush()
1979         }
1980     ' "$@"
1981 }
1982 
1983 # end all lines with an ANSI-code to reset styles
1984 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1985 
1986 # end all lines with an ANSI-code to reset styles
1987 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1988 
1989 # play audio/video media
1990 # play() { mplayer -msglevel all=-1 "${@:--}"; }
1991 
1992 # play audio/video media
1993 play() { mpv "${@:--}"; }
1994 
1995 # Pick LINE, using the 1-based line-number given
1996 pline() {
1997     local line="$1"
1998     [ $# -gt 0 ] && shift
1999     awk -v n="${line}" '
2000         BEGIN { if (n < 1) exit 0 }
2001         NR == n { print; exit 0 }
2002     ' "$@"
2003 }
2004 
2005 # Paused MPV; especially useful when trying to view pictures via `mpv`
2006 pmpv() { mpv --pause "${@:--}"; }
2007 
2008 # Print Python result
2009 pp() { python -c "print($1)"; }
2010 
2011 # PRecede (input) ECHO, prepends a first line to stdin lines
2012 precho() { echo "$@" && cat /dev/stdin; }
2013 
2014 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
2015 prememo() {
2016     awk '
2017         BEGIN {
2018             if (ARGC > 1) printf "\x1b[7m"
2019             for (i = 1; i < ARGC; i++) {
2020                 if (i > 1) printf " "
2021                 printf "%s", ARGV[i]
2022                 delete ARGV[i]
2023             }
2024             if (ARGC > 1) printf "\x1b[0m\n"
2025             fflush()
2026         }
2027         { print; fflush() }
2028     ' "$@"
2029 }
2030 
2031 # start by joining all arguments given as a tab-separated-items line of output,
2032 # followed by all lines from stdin verbatim
2033 pretsv() {
2034     awk '
2035         BEGIN {
2036             for (i = 1; i < ARGC; i++) {
2037                 if (i > 1) printf "\t"
2038                 printf "%s", ARGV[i]
2039                 delete ARGV[i]
2040             }
2041             if (ARGC > 1) printf "\n"
2042             fflush()
2043         }
2044         { print; fflush() }
2045     ' "$@"
2046 }
2047 
2048 # Plain Recursive Interactive Grep
2049 prig() { ugrep --color=never -r -Q -E "$@"; }
2050 
2051 # show/list all current processes
2052 processes() {
2053     local res
2054     res="$(ps aux)"
2055     echo "${res}" | awk '!/ps aux$/' | sed -E \
2056         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
2057         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
2058 }
2059 
2060 # Play Youtube Audio
2061 pya() {
2062     local url
2063     # some youtube URIs end with extra playlist/tracker parameters
2064     url="$(echo "$1" | sed 's-&.*--')"
2065     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2066 }
2067 
2068 # Quiet ignores stderr, without any ugly keyboard-dancing
2069 q() { "$@" 2> /dev/null; }
2070 
2071 # Quiet MPV
2072 qmpv() { mpv --quiet "${@:--}"; }
2073 
2074 # ignore stderr, without any ugly keyboard-dancing
2075 quiet() { "$@" 2> /dev/null; }
2076 
2077 # Reset the screen, which empties it and resets the current style
2078 r() { reset; }
2079 
2080 # keep only lines between the 2 line numbers given, inclusively
2081 rangelines() {
2082     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
2083         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
2084 }
2085 
2086 # RANdom MANual page
2087 ranman() {
2088     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
2089         sed 's-\.gz$--' | xargs man
2090 }
2091 
2092 # Run AWK expression
2093 rawk() {
2094     local expr="${1:-0}"
2095     [ $# -gt 0 ] && shift
2096     awk "BEGIN { print ${expr}; exit }" "$@"
2097 }
2098 
2099 # play a ready-phone-line sound lasting the number of seconds given, or for 1
2100 # second by default; uses my script `waveout`
2101 ready() {
2102     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
2103     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2104 }
2105 
2106 # reflow/trim lines of prose (text) to improve its legibility: it's especially
2107 # useful when the text is pasted from web-pages being viewed in reader mode
2108 reprose() {
2109     local w="${1:-80}"
2110     [ $# -gt 0 ] && shift
2111     awk '
2112         FNR == 1 && NR > 1 { print "" }
2113         { gsub(/\r$/, ""); print; fflush() }
2114     ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--'
2115 }
2116 
2117 # ignore ansi styles from stdin and restyle things using the style-name given;
2118 # uses my script `style`
2119 restyle() { style "$@"; }
2120 
2121 # change the tab-title on your terminal app
2122 retitle() { printf "\e]0;%s\a\n" "$*"; }
2123 
2124 # REVerse-order SIZE (byte-count)
2125 revsize() { wc -c "$@" | sort -rn; }
2126 
2127 # Run In Folder
2128 rif() {
2129     local code
2130     pushd "${1:-.}" > /dev/null || return
2131     [ $# -gt 0 ] && shift
2132     "$@"
2133     code=$?
2134     popd > /dev/null || return "${code}"
2135     return "${code}"
2136 }
2137 
2138 # play a ringtone-style sound lasting the number of seconds given, or for 1
2139 # second by default; uses my script `waveout`
2140 ringtone() {
2141     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2142     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2143 }
2144 
2145 # Read-Only Editor
2146 roe() { micro -readonly true "$@"; }
2147 
2148 # Read-Only Micro (text editor)
2149 rom() { micro -readonly true "$@"; }
2150 
2151 # run the command given, trying to turn its output into TSV (tab-separated
2152 # values); uses my script `dejson`
2153 rtab() { jc "$@" | dejson; }
2154 
2155 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2156 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2157 
2158 # show a RULER-like width-measuring line
2159 # ruler() {
2160 #     local n="${1:-$(tput cols)}"
2161 #     [ "${n}" -gt 0 ] && printf "%${n}s\n" "" |
2162 #         sed -E 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2163 # }
2164 
2165 # show a RULER-like width-measuring line
2166 ruler() {
2167     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2168         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2169 }
2170 
2171 # run the command given, trying to turn its output into TSV (tab-separated
2172 # values); uses my script `dejson`
2173 runtab() { jc "$@" | dejson; }
2174 
2175 # run the command given, trying to turn its output into TSV (tab-separated
2176 # values); uses my script `dejson`
2177 runtsv() { jc "$@" | dejson; }
2178 
2179 # Reverse-order WC
2180 rwc() { wc "$@" | sort -rn; }
2181 
2182 # extended-mode Sed, enabling its full regex syntax
2183 # s() { sed -E -u "$@"; }
2184 
2185 # Substitute using `sed`, enabling its full regex syntax
2186 s() { sed -E -u "$(printf "s\xff$1\xff$2\xffg")"; }
2187 
2188 # Silent CURL spares you the progress bar, but still tells you about errors
2189 scurl() { curl --show-error -s "$@"; }
2190 
2191 # show a unique-looking SEParator line; useful to run between commands
2192 # which output walls of text
2193 sep() {
2194     [ "${1:-80}" -gt 0 ] &&
2195         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2196 }
2197 
2198 # webSERVE files in a folder as localhost, using the port number given, or
2199 # port 8080 by default
2200 serve() {
2201     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2202     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2203 }
2204 
2205 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2206 setdiff() {
2207     # comm -23 <(sort "$1") <(sort "$2")
2208     # dash doesn't support the process-sub syntax
2209     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2210 }
2211 
2212 # SET INtersection, sorts its 2 inputs, then finds common lines
2213 setin() {
2214     # comm -12 <(sort "$1") <(sort "$2")
2215     # dash doesn't support the process-sub syntax
2216     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2217 }
2218 
2219 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2220 setsub() {
2221     # comm -23 <(sort "$1") <(sort "$2")
2222     # dash doesn't support the process-sub syntax
2223     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2224 }
2225 
2226 # Show Files (and folders), coloring folders and links; uses my script `nn`
2227 sf() {
2228     ls -al --file-type --color=never --time-style iso "$@" | awk '
2229         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2230         {
2231             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2232             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2233             printf "%6d  %s\n", NR - 1, $0; fflush()
2234         }
2235     ' | nn --gray | less -JMKiCRS
2236 }
2237 
2238 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2239 # uses my scripts `nn` and `cext`
2240 sfp() {
2241     ls -al --file-type --color=never --time-style iso "$@" | awk '
2242         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2243         {
2244             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2245             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2246             printf "%6d  %s\n", NR - 1, $0; fflush()
2247         }
2248     ' | nn --gray | cext | less -JMKiCRS
2249 }
2250 
2251 # Show File Sizes, using my scripts `nn` and `cext`
2252 sfs() {
2253     # turn arg-list into single-item lines
2254     printf "%s\x00" "$@" |
2255     # calculate file-sizes, and reverse-sort results
2256     xargs -0 wc -c | sort -rn |
2257     # add/realign fields to improve legibility
2258     awk '
2259         # start output with a header-like line, and add a MiB field
2260         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2261         # make table breathe with empty lines, so tall outputs are readable
2262         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2263         # emit regular output lines
2264         {
2265             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2266             # first field is likely space-padded
2267             gsub(/^ */, "")
2268             # slice line after the first field, as filepaths can have spaces
2269             $0 = substr($0, length($1) + 1)
2270             # first field is likely space-padded
2271             gsub(/^ /, "")
2272             printf "%s\n", $0; fflush()
2273         }
2274     ' |
2275     # make zeros in the MiB field stand out with a special color
2276     awk '
2277         {
2278             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2279             print; fflush()
2280         }
2281     ' |
2282     # make numbers nice, alternating styles along 3-digit groups
2283     nn --gray |
2284     # color-code file extensions
2285     cext |
2286     # make result interactively browsable
2287     less -JMKiCRS
2288 }
2289 
2290 # SHell-run AWK output
2291 # shawk() { stdbuf -oL awk "$@" | sh; }
2292 
2293 # time-run various tools given one-per-line from stdin, giving them extra
2294 # common arguments passed as explicit arguments
2295 showdown() {
2296     awk '
2297         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2298         {
2299             printf "%s", $0
2300             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2301             printf "\x00"; fflush()
2302         }
2303     ' "$@" | xargs -0 hyperfine --style full
2304 }
2305 
2306 # SHOW a command, then RUN it
2307 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2308 
2309 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2310 # single-filepaths compatible with `xargs`, since standard shell settings
2311 # get in the way of filepaths with spaces and other special symbols in them
2312 shquote() {
2313     awk '
2314         {
2315             s = $0
2316             gsub(/\r$/, "", s)
2317             gsub(/\\/, "\\\\", s)
2318             gsub(/"/, "\\\"", s)
2319             gsub(/`/, "\\`", s)
2320             gsub(/\$/, "\\$", s)
2321             printf "\"%s\"\n", s; fflush()
2322         }
2323     ' "$@"
2324 }
2325 
2326 # clean the screen, after running the command given
2327 sideshow() {
2328     local res
2329     tput smcup
2330     "$@"
2331     res=$?
2332     tput rmcup
2333     return "${res}"
2334 }
2335 
2336 # skip the first n lines, or the 1st line by default
2337 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2338 
2339 # skip the first n bytes
2340 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2341 
2342 # skip the last n lines, or the last line by default
2343 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2344 
2345 # skip the last n bytes
2346 skiplastbytes() { head -c -"$1" "${2:--}"; }
2347 
2348 # skip the last n lines, or the last line by default
2349 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2350 
2351 # skip the first n lines, or the 1st line by default
2352 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2353 
2354 # SLOW/delay lines from the standard-input, waiting the number of seconds
2355 # given for each line, or waiting 1 second by default
2356 slow() {
2357     local seconds="${1:-1}"
2358     (
2359         IFS="$(printf "\n")"
2360         while read -r line; do
2361             sleep "${seconds}"
2362             printf "%s\n" "${line}"
2363         done
2364     )
2365 }
2366 
2367 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2368 slp() {
2369     local title
2370     title="Latest Podcast Episodes as of $(date +'%F %T')"
2371     podfeed -title "${title}" "$@" | si
2372 }
2373 
2374 # recursively find all files with fewer bytes than the number given
2375 smallfiles() {
2376     local n
2377     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2378     [ $# -gt 0 ] && shift
2379 
2380     local arg
2381     for arg in "${@:-.}"; do
2382         if [ ! -d "${arg}" ]; then
2383             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2384             return 1
2385         fi
2386         stdbuf -oL find "${arg}" -type f -size -"$n"c
2387     done
2388 }
2389 
2390 # emit the first line as is, sorting all lines after that, using the
2391 # `sort` command, passing all/any arguments/options to it
2392 sortrest() {
2393     awk -v sort="sort $*" '
2394         { gsub(/\r$/, "") }
2395         NR == 1 { print; fflush() }
2396         NR > 1 { print | sort }
2397     '
2398 }
2399 
2400 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2401 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2402 # all/any arguments/options to it
2403 sortsv() {
2404     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2405         { gsub(/\r$/, "") }
2406         NR == 1 { print; fflush() }
2407         NR > 1 { print | sort }
2408     '
2409 }
2410 
2411 # emit a line with the number of spaces given in it
2412 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2413 
2414 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2415 # in the middle of lines, as well as trailing carriage returns
2416 squeeze() {
2417     awk '
2418         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2419         {
2420             gsub(/^ +| *\r?$/, "")
2421             gsub(/ *\t */, "\t")
2422             gsub(/  +/, " ")
2423             print; fflush()
2424         }
2425     ' "$@"
2426 }
2427 
2428 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2429 # of multiple spaces in the middle of lines, as well as trailing carriage
2430 # returns, while also turning runs of empty lines into single empty lines,
2431 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2432 # lines vertically
2433 squomp() {
2434     awk '
2435         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2436         /^\r?$/ { empty = 1; next }
2437         empty { if (n > 0) print ""; empty = 0 }
2438         {
2439             gsub(/^ +| *\r?$/, "")
2440             gsub(/ *\t */, "\t")
2441             gsub(/  +/, " ")
2442             print; fflush()
2443             n++
2444         }
2445     ' "$@"
2446 }
2447 
2448 # Show a command, then Run it
2449 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2450 
2451 # turn runs of empty lines into single empty lines, effectively squeezing
2452 # paragraphs vertically, so to speak; runs of empty lines both at the start
2453 # and at the end are ignored
2454 stomp() {
2455     awk '
2456         /^\r?$/ { empty = 1; next }
2457         empty { if (n > 0) print ""; empty = 0 }
2458         { print; fflush(); n++ }
2459     ' "$@"
2460 }
2461 
2462 # STRike-thru (lines) with AWK
2463 strawk() {
2464     local cond="${1:-1}"
2465     [ $# -gt 0 ] && shift
2466     awk '
2467         { low = lower = tolower($0) }
2468         '"${cond}"' {
2469             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2470             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2471             next
2472         }
2473         { print; fflush() }
2474     ' "$@"
2475 }
2476 
2477 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2478 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2479 # all/any arguments/options to it
2480 stsv() {
2481     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2482         { gsub(/\r$/, "") }
2483         NR == 1 { print; fflush() }
2484         NR > 1 { print | sort }
2485     '
2486 }
2487 
2488 # use the result of the `awk` function `substr` for each line
2489 substr() {
2490     local start="${1:-1}"
2491     local length="${2:-80}"
2492     [ $# -gt 0 ] && shift
2493     [ $# -gt 0 ] && shift
2494     awk -v start="${start}" -v len="${length}" \
2495         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2496 }
2497 
2498 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2499 # what's given, before relinquishing existing privileges
2500 # sudoff() {
2501 #     local code=0
2502 #     if [ $# -gt 0 ]; then
2503 #         sudo "$@"
2504 #         code=$?
2505 #     fi
2506 #     sudo -k
2507 #     return "${code}"
2508 # }
2509 
2510 # append a final Tab-Separated-Values line with the sums of all columns from
2511 # the input table(s) given; items from first lines aren't counted/added
2512 sumtsv() {
2513     awk -F "\t" '
2514         {
2515             print; fflush()
2516             if (width < NF) width = NF
2517         }
2518 
2519         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2520 
2521         END {
2522             for (i = 1; i <= width; i++) {
2523                 if (i > 1) printf "\t"
2524                 printf "%s", sums[i] ""
2525             }
2526             if (width > 0) printf "\n"
2527         }
2528     ' "$@"
2529 }
2530 
2531 # show a random command defined in `clam`, using `wat` from `clam` itself
2532 # surprise() {
2533 #     local p="$(which clam)"
2534 #     wat "$(grep -E '^[a-z]+\(' "$p" | shuf -n 1 | sed -E 's-\(.*--')"
2535 # }
2536 
2537 # Time the command given
2538 t() { /usr/bin/time "$@"; }
2539 
2540 # show a reverse-sorted tally of all lines read, where ties are sorted
2541 # alphabetically
2542 tally() {
2543     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2544         # reassure users by instantly showing the header
2545         BEGIN { print "value\ttally"; fflush() }
2546         { gsub(/\r$/, ""); t[$0]++ }
2547         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2548     ' "$@"
2549 }
2550 
2551 # Tab AWK: TSV-specific I/O settings for `awk`
2552 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2553 
2554 # Tab AWK: TSV-specific I/O settings for `awk`
2555 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2556 
2557 # quick alias for my script `tbp`
2558 tb() { tbp "$@"; }
2559 
2560 # Titled conCATenate Lines highlights each filename, before emitting its
2561 # lines
2562 tcatl() {
2563     awk '
2564         FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() }
2565         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2566         { gsub(/\r$/, ""); print; fflush() }
2567     ' "$@"
2568 }
2569 
2570 # Title ECHO changes the tab-title on your terminal app
2571 techo() { printf "\e]0;%s\a\n" "$*"; }
2572 
2573 # simulate the cadence of old-fashioned teletype machines, by slowing down
2574 # the output of ASCII/UTF-8 symbols from the standard-input
2575 # teletype() {
2576 #     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2577 #         IFS="$(printf "\n")"
2578 #         while read -r line; do
2579 #             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2580 #                 while read -r item; do
2581 #                     sleep 0.015
2582 #                     printf "%s" "${item}"
2583 #                 done
2584 #             sleep 0.75
2585 #             printf "\n"
2586 #         done
2587 #     )
2588 # }
2589 
2590 # simulate the cadence of old-fashioned teletype machines, by slowing down
2591 # the output of ASCII/UTF-8 symbols from the standard-input
2592 teletype() {
2593     awk '
2594         {
2595             gsub(/\r$/, "")
2596 
2597             n = length($0)
2598             for (i = 1; i <= n; i++) {
2599                 if (code = system("sleep 0.015")) exit code
2600                 printf "%s", substr($0, i, 1); fflush()
2601             }
2602             if (code = system("sleep 0.75")) exit code
2603             printf "\n"; fflush()
2604         }
2605     ' "$@"
2606 }
2607 
2608 # run `top` without showing any of its output after quitting it
2609 tip() { tput smcup; top "$@"; tput rmcup; }
2610 
2611 # change the tab-title on your terminal app
2612 title() { printf "\e]0;%s\a\n" "$*"; }
2613 
2614 # quick alias for my script `tjp`
2615 tj() { tjp "$@"; }
2616 
2617 # quick alias for my script `tlp`
2618 tl() { tlp "$@"; }
2619 
2620 # show current date in a specifc format
2621 today() { date +'%Y-%m-%d %a %b %d'; }
2622 
2623 # get the first n lines, or 1 by default
2624 toline() { head -n "${1:-1}" "${2:--}"; }
2625 
2626 # lowercase all ASCII symbols
2627 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2628 
2629 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2630 # second by default: after the optional duration, the next optional arguments
2631 # are the volume and the tone-frequency; uses my script `waveout`
2632 tone() {
2633     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2634         mpv --really-quiet -
2635 }
2636 
2637 # get the processes currently using the most cpu
2638 topcpu() {
2639     local n="${1:-10}"
2640     [ "$n" -gt 0 ] && ps aux | awk '
2641         NR == 1 { print; fflush() }
2642         NR > 1 { print | "sort -rnk3" }
2643     ' | head -n "$(("$n" + 1))"
2644 }
2645 
2646 # show all files directly in the folder given, without looking any deeper
2647 topfiles() {
2648     local arg
2649     for arg in "${@:-.}"; do
2650         if [ ! -d "${arg}" ]; then
2651             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2652             return 1
2653         fi
2654         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2655     done
2656 }
2657 
2658 # show all folders directly in the folder given, without looking any deeper
2659 topfolders() {
2660     local arg
2661     for arg in "${@:-.}"; do
2662         if [ ! -d "${arg}" ]; then
2663             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2664             return 1
2665         fi
2666         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2667             awk '!/^\.$/ { print; fflush() }'
2668     done
2669 }
2670 
2671 # get the processes currently using the most memory
2672 topmemory() {
2673     local n="${1:-10}"
2674     [ "$n" -gt 0 ] && ps aux | awk '
2675         NR == 1 { print; fflush() }
2676         NR > 1 { print | "sort -rnk6" }
2677     ' | head -n "$(("$n" + 1))"
2678 }
2679 
2680 # transpose (switch) rows and columns from tables
2681 transpose() {
2682     awk '
2683         { gsub(/\r$/, "") }
2684 
2685         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
2686 
2687         {
2688             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2689             if (maxitems < NF) maxitems = NF
2690         }
2691 
2692         END {
2693             for (j = 1; j <= maxitems; j++) {
2694                 for (i = 1; i <= NR; i++) {
2695                     if (i > 1) printf "\t"
2696                     printf "%s", lines[j][i]
2697                 }
2698                 printf "\n"
2699             }
2700         }
2701     ' "$@"
2702 }
2703 
2704 # ignore leading/trailing spaces, as well as trailing carriage returns
2705 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2706 
2707 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2708 # decimal dots themselves, when decimals in a number are all zeros; works
2709 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2710 # trimdecs() {
2711 #     awk '
2712 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2713 #         {
2714 #             gsub(/\r$/, "")
2715 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2716 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2717 #             print; fflush()
2718 #         }
2719 #     ' "$@"
2720 # }
2721 
2722 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2723 # decimal dots themselves, when decimals in a number are all zeros
2724 trimdecs() {
2725     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" |
2726         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2727 }
2728 
2729 # ignore trailing spaces, as well as trailing carriage returns
2730 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2731 
2732 # ignore trailing spaces, as well as trailing carriage returns
2733 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2734 
2735 # ignore leading/trailing spaces, as well as trailing carriage returns
2736 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2737 
2738 # ignore leading/trailing spaces, as well as trailing carriage returns
2739 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2740 
2741 # ignore trailing spaces, as well as trailing carriage returns
2742 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2743 
2744 # ignore trailing spaces, as well as trailing carriage returns
2745 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2746 
2747 # try running a command, emitting an explicit message to standard-error
2748 # if the command given fails
2749 try() {
2750     "$@" || {
2751         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2752         return 255
2753     }
2754 }
2755 
2756 # Transform Strings with Python; uses my script `tbp`
2757 tsp() { tbp -s "$@"; }
2758 
2759 # run the command given, trying to turn its output into TSV (tab-separated
2760 # values); uses my script `dejson`
2761 tsvrun() { jc "$@" | dejson; }
2762 
2763 # Underline (lines) with AWK
2764 uawk() {
2765     local cond="${1:-1}"
2766     [ $# -gt 0 ] && shift
2767     awk '
2768         { low = lower = tolower($0) }
2769         '"${cond}"' {
2770             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2771             printf "\x1b[4m%s\x1b[0m\n", $0; fflush()
2772             next
2773         }
2774         { print; fflush() }
2775     ' "$@"
2776 }
2777 
2778 # Underline Every few lines: make groups of 5 lines (by default) stand out by
2779 # underlining the last line of each
2780 ue() {
2781     local n="${1:-5}"
2782     [ $# -gt 0 ] && shift
2783     awk -v n="$n" '
2784         BEGIN { if (n == 0) n = -1 }
2785         NR % n == 0 && NR != 1 {
2786             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2787             printf("\x1b[4m%s\x1b[0m\n", $0); fflush()
2788             next
2789         }
2790         { print; fflush() }
2791     ' "$@"
2792 }
2793 
2794 # deduplicate lines, keeping them in their original order
2795 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2796 
2797 # concatenate all named input sources unix-style: all trailing CRLFs become
2798 # single LFs, each non-empty input will always end in a LF, so lines from
2799 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2800 # first line of each input are ignored, as those are useless at best
2801 unixify() {
2802     awk '
2803         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2804         { gsub(/\r$/, ""); print; fflush() }
2805     ' "$@"
2806 }
2807 
2808 # go UP n folders, or go up 1 folder by default
2809 up() {
2810     if [ "${1:-1}" -le 0 ]; then
2811         cd .
2812         return $?
2813     fi
2814 
2815     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2816 }
2817 
2818 # convert United States Dollars into CAnadian Dollars, using the latest
2819 # official exchange rates from the bank of canada; during weekends, the
2820 # latest rate may be from a few days ago; the default amount of usd to
2821 # convert is 1, when not given
2822 usd2cad() {
2823     local site='https://www.bankofcanada.ca/valet/observations/group'
2824     local csv_rates="${site}/FX_RATES_DAILY/csv"
2825     local url
2826     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2827     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2828         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2829         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2830 }
2831 
2832 # View with `less`
2833 v() { less -JMKiCRS "$@"; }
2834 
2835 # run a command, showing its success/failure right after
2836 verdict() {
2837     local code
2838     "$@"
2839     code=$?
2840 
2841     if [ "${code}" -eq 0 ]; then
2842         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2843     else
2844         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2845     fi
2846     return "${code}"
2847 }
2848 
2849 # run `cppcheck` with even stricter options
2850 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2851 
2852 # run `cppcheck` with even stricter options
2853 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2854 
2855 # check shell scripts for common gotchas, avoiding complaints about using
2856 # the `local` keyword, which is widely supported in practice
2857 vetshell() { shellcheck -e 3043 "$@"; }
2858 
2859 # View with Header runs `less` without line numbers, with ANSI styles, no
2860 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2861 # so they always show on top
2862 vh() {
2863     local n="${1:-1}"
2864     [ $# -gt 0 ] && shift
2865     less --header="$n" -JMKiCRS "$@"
2866 }
2867 
2868 # VIEW the result of showing a command, then RUNning it, using `less`
2869 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2870 
2871 # View Nice Columns; uses my scripts `realign` and `nn`
2872 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2873 
2874 # View Nice Hexadecimals; uses my script `nh`
2875 vnh() { nh "$@" | less -JMKiCRS; }
2876 
2877 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2878 vnj() { nj "$@" | less -JMKiCRS; }
2879 
2880 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2881 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2882 
2883 # View Nice Numbers; uses my script `nn`
2884 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2885 
2886 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2887 vnt() {
2888     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2889         nt | nn --gray |
2890         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2891         less -JMKiCRS #--header=1
2892 }
2893 
2894 # View-Run using `less`: show a command, then run it
2895 # vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less --header=1 -JMKiCRS; }
2896 
2897 # View-Run using `less`: show a command, then run it
2898 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2899 
2900 # View Text with `less`
2901 # vt() { less -JMKiCRS "$@"; }
2902 
2903 # View Text with the `micro` text-editor in read-only mode
2904 vt() { micro -readonly true "$@"; }
2905 
2906 # What are these (?); uses my command `nwat`
2907 # w() { nwat "$@"; }
2908 
2909 # What Are These (?) shows what the names given to it are/do
2910 wat() {
2911     local a
2912     local gap=0
2913 
2914     if [ $# -eq 0 ]; then
2915         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2916         return 1
2917     fi
2918 
2919     for a in "$@"; do
2920         [ "${gap}" -gt 0 ] && printf "\n"
2921         gap=1
2922         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2923         printf "\e[7m%-80s\e[0m\n" "$a"
2924 
2925         # resolve 1 alias level
2926         if alias "$a" 2> /dev/null > /dev/null; then
2927             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2928         fi
2929 
2930         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2931             # resolved aliases with args/spaces in them would otherwise fail
2932             echo "$a"
2933         elif whence -f "$a" > /dev/null 2> /dev/null; then
2934             # zsh seems to show a shell function's code only via `whence -f`
2935             whence -f "$a"
2936         elif type "$a" > /dev/null 2> /dev/null; then
2937             # dash doesn't support `declare`, and `type` in bash emits
2938             # a redundant first output line, when it's a shell function
2939             type "$a" | awk '
2940                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2941                 { print; fflush() }
2942                 END { if (NR < 2 && skipped) print skipped }
2943             '
2944         else
2945             printf "\e[31m%s not found\e[0m\n" "$a"
2946         fi
2947     done | less -JMKiCRS
2948 }
2949 
2950 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2951 # lines instead
2952 wctsv() {
2953     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2954     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2955         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2956         awk '
2957             NR > 1 { print prev; fflush() }
2958             { prev = $0 }
2959             END { if (NR == 1 || !/^total\t/) print }
2960         '
2961 }
2962 
2963 # get weather forecasts, almost filling the terminal's current width
2964 # weather() {
2965 #     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2966 #     curl --show-error -s telnet://graph.no:79 |
2967 #     sed -E \
2968 #         -e 's/ *\r?$//' \
2969 #         -e '/^\[/d' \
2970 #         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
2971 #         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
2972 #         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
2973 #         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
2974 #         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
2975 #         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
2976 #         -e 's/\*/○/g' |
2977 #     awk 1 |
2978 #     less -JMKiCRS
2979 # }
2980 
2981 # get weather forecasts; uses my script `nwf`
2982 weather() { nwf "$@"; }
2983 
2984 # Weather Forecast
2985 wf() {
2986     printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 |
2987         awk '{ print; fflush() }' | less -JMKiCRS
2988 }
2989 
2990 # recursively find all files with trailing spaces/CRs
2991 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2992 
2993 # recursively find all files with trailing spaces/CRs
2994 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2995 
2996 # turn all full linux/unix-style paths (which start from the filesystem root)
2997 # detected into WINdows-style PATHS
2998 winpaths() {
2999     awk '{ print; fflush() }' "$@" |
3000         sed -u -E 's-(/mnt/([A-Za-z])(/))-\u\2:/-g'
3001 }
3002 
3003 # run `xargs`, using whole lines as extra arguments
3004 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
3005 
3006 # run `xargs`, using whole lines as extra arguments
3007 # x() {
3008 #     awk -v ORS='\000' '
3009 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
3010 #         { gsub(/\r$/, ""); print; fflush() }
3011 #     ' | xargs -0 "$@"
3012 # }
3013 
3014 # run `xargs`, using zero/null bytes as the extra-arguments terminator
3015 x0() { xargs -0 "$@"; }
3016 
3017 # run `xargs`, using whole lines as extra arguments
3018 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
3019 
3020 # run `xargs`, using whole lines as extra arguments
3021 xl() {
3022     awk -v ORS='\000' '
3023         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
3024         { gsub(/\r$/, ""); print; fflush() }
3025     ' | xargs -0 "$@"
3026 }
3027 
3028 # Youtube Audio Player
3029 yap() {
3030     local url
3031     # some youtube URIs end with extra playlist/tracker parameters
3032     url="$(echo "$1" | sed 's-&.*--')"
3033     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
3034 }
3035 
3036 # show a calendar for the current YEAR, or for the year given
3037 year() {
3038     {
3039         # show the current date/time center-aligned
3040         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
3041             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
3042         # debian linux has a different `cal` app which highlights the day
3043         if [ -e "/usr/bin/ncal" ]; then
3044             # fix debian/ncal's weird way to highlight the current day
3045             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
3046         else
3047             cal -y "$@"
3048         fi
3049     } | less -JMKiCRS
3050 }
3051 
3052 # show the current date in the YYYY-MM-DD format
3053 ymd() { date +'%Y-%m-%d'; }
3054 
3055 # YouTube Url
3056 ytu() {
3057     local url
3058     # some youtube URIs end with extra playlist/tracker parameters
3059     url="$(echo "$1" | sed 's-&.*--')"
3060     [ $# -gt 0 ] && shift
3061     yt-dlp "$@" --get-url "${url}"
3062 }
3063 
3064 # . <(
3065 #     find "$(dirname $(which clam))" -type f -print0 |
3066 #         xargs -0 -n 1 basename |
3067 #         awk '{ print "unset " $0; print "unalias " $0 }'
3068 # ) 2> /dev/null