File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 case "$1" in
  55     -h|--h|-help|--help)
  56         # show help message, using the info-comment from this very script
  57         awk '
  58             /^case / { exit }
  59             /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print }
  60         ' "$0"
  61         exit 0
  62     ;;
  63 esac
  64 
  65 
  66 # dash doesn't support regex-matching syntax, forcing to use case statements
  67 case "$0" in
  68     -bash|-dash|-sh|bash|dash|sh)
  69         # script is being sourced with bash or dash, which is good
  70         :
  71     ;;
  72     *)
  73         case "$ZSH_EVAL_CONTEXT" in
  74             *:file)
  75                 # script is being sourced with zsh, which is good
  76                 :
  77             ;;
  78             *)
  79                 # script is being run normally, which is a waste of time
  80 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  81 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  82                 # failing during shell-startup may deny shell access, so exit
  83                 # with a 0 error-code to declare success
  84                 exit 0
  85             ;;
  86         esac
  87     ;;
  88 esac
  89 
  90 
  91 # n-column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  92 alias 1='bsbs 1'
  93 alias 2='bsbs 2'
  94 alias 3='bsbs 3'
  95 alias 4='bsbs 4'
  96 alias 5='bsbs 5'
  97 alias 6='bsbs 6'
  98 alias 7='bsbs 7'
  99 alias 8='bsbs 8'
 100 alias 9='bsbs 9'
 101 alias 0='bsbs 10'
 102 
 103 # alias a=avoid
 104 # alias c=cat
 105 # alias e=echo
 106 # alias f=fetch
 107 # alias g=get
 108 # alias h=naman
 109 # alias m=match
 110 # alias p=plain
 111 # alias q=quiet
 112 # alias r=reset
 113 # alias t=time
 114 # alias y=year
 115 
 116 # find name from the local `apt` database of installable packages
 117 # aptfind() {
 118 #     # despite warnings, the `apt search` command has been around for years
 119 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 120 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" |
 121 #         sed -u 's/^--$//' | less -JMKiCRS
 122 # }
 123 
 124 # emit each argument given as its own line of output
 125 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 126 
 127 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 128 # their basic ASCII counterparts, and where non-ASCII symbols become question
 129 # marks, one question mark for each code-point byte
 130 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 131 
 132 # avoid/ignore lines which match any of the regexes given
 133 avoid() {
 134     awk '
 135         BEGIN {
 136             for (i = 1; i < ARGC; i++) {
 137                 e[i] = ARGV[i]
 138                 delete ARGV[i]
 139             }
 140         }
 141 
 142         {
 143             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 144             print; fflush()
 145             got++
 146         }
 147 
 148         END { exit(got == 0) }
 149     ' "${@:-^\r?$}"
 150 }
 151 
 152 # AWK Begin
 153 # awkb() { awk "BEGIN { $1; exit }"; }
 154 
 155 # AWK Begin
 156 awkb() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 157 
 158 # emit a line with a repeating ball-like symbol in it
 159 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 160 
 161 # show an ansi-styled BANNER-like line
 162 # banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 163 
 164 # show an ansi-styled BANNER-like line
 165 banner() { printf "\e[7m%-$(tput cols)s\e[0m\n" "$*"; }
 166 
 167 # emit a colored bar which can help visually separate different outputs
 168 bar() {
 169     [ "${1:-80}" -gt 0 ] &&
 170         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 171 }
 172 
 173 # process Blocks/paragraphs of non-empty lines with AWK
 174 # bawk() { awk -F='' -v RS='' "$@"; }
 175 
 176 # process Blocks/paragraphs of non-empty lines with AWK
 177 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 178 
 179 # play a repeating and annoying high-pitched beep sound a few times a second,
 180 # lasting the number of seconds given, or for 1 second by default; uses my
 181 # script `waveout`
 182 beeps() {
 183     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 184     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 185 }
 186 
 187 # start by joining all arguments given as a tab-separated-items line of output,
 188 # followed by all lines from stdin verbatim
 189 begintsv() {
 190     awk '
 191         BEGIN {
 192             for (i = 1; i < ARGC; i++) {
 193                 if (i > 1) printf "\t"
 194                 printf "%s", ARGV[i]
 195                 delete ARGV[i]
 196             }
 197             if (ARGC > 1) printf "\n"
 198             fflush()
 199         }
 200         { print; fflush() }
 201     ' "$@"
 202 }
 203 
 204 # play a repeating synthetic-bell-like sound lasting the number of seconds
 205 # given, or for 1 second by default; uses my script `waveout`
 206 bell() {
 207     local f='sin(880*tau*u) * exp(-10*u)'
 208     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 209 }
 210 
 211 # play a repeating sound with synthetic-bells, lasting the number of seconds
 212 # given, or for 1 second by default; uses my script `waveout`
 213 bells() {
 214     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 215     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 216 }
 217 
 218 # Breathe Header: add an empty line after the first one (the header), then
 219 # separate groups of 5 lines (by default) with empty lines between them
 220 bh() {
 221     local n="${1:-5}"
 222     [ $# -gt 0 ] && shift
 223     awk -v n="$n" '
 224         BEGIN { if (n == 0) n = -1 }
 225         (NR - 1) % n == 1 && NR > 1 { print "" }
 226         { print; fflush() }
 227     ' "$@"
 228 }
 229 
 230 # recursively find all files with at least the number of bytes given; when
 231 # not given a minimum byte-count, the default is 100 binary megabytes
 232 bigfiles() {
 233     local n
 234     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 235     [ $# -gt 0 ] && shift
 236 
 237     local arg
 238     for arg in "${@:-.}"; do
 239         if [ ! -d "${arg}" ]; then
 240             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 241             return 1
 242         fi
 243         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 244     done
 245 }
 246 
 247 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 248 bl() {
 249     local n="${1:-5}"
 250     [ $# -gt 0 ] && shift
 251     awk -v n="$n" '
 252         BEGIN { if (n == 0) n = -1 }
 253         NR % n == 1 && NR != 1 { print "" }
 254         { print; fflush() }
 255     ' "$@"
 256 }
 257 
 258 # process BLocks/paragraphs of non-empty lines with AWK
 259 # blawk() { awk -F='' -v RS='' "$@"; }
 260 
 261 # process BLocks/paragraphs of non-empty lines with AWK
 262 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 263 
 264 # emit a line with a repeating block-like symbol in it
 265 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 266 
 267 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 268 # my script `bsbs`
 269 bman() {
 270     local w
 271     w="$(tput cols)"
 272     if [ "$w" -gt 100 ]; then
 273         w="$((w / 2 - 1))"
 274     fi
 275     MANWIDTH="$w" man "$@" | bsbs 2
 276 }
 277 
 278 # Begin-Only Awk
 279 # boa() { awk "BEGIN { $1; exit }"; }
 280 
 281 # Begin-Only Awk
 282 boa() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 283 
 284 # Begin-Only AWK
 285 # boawk() { awk "BEGIN { $1; exit }"; }
 286 
 287 # Begin-Only AWK
 288 boawk() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 289 
 290 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 291 # my script `bsbs`
 292 bookman() {
 293     local w
 294     w="$(tput cols)"
 295     if [ "$w" -gt 100 ]; then
 296         w="$((w / 2 - 1))"
 297     fi
 298     MANWIDTH="$w" man "$@" | bsbs 2
 299 }
 300 
 301 # split lines using the regex given, turning them into single-item lines
 302 breakdown() {
 303     local sep="${1:- }"
 304     [ $# -gt 0 ] && shift
 305     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 306 }
 307 
 308 # separate groups of 5 lines (by default) with empty lines
 309 breathe() {
 310     local n="${1:-5}"
 311     [ $# -gt 0 ] && shift
 312     awk -v n="$n" '
 313         BEGIN { if (n == 0) n = -1 }
 314         NR % n == 1 && NR != 1 { print "" }
 315         { print; fflush() }
 316     ' "$@"
 317 }
 318 
 319 # Browse Text
 320 bt() { less -JMKNiCRS "$@"; }
 321 
 322 # show a reverse-sorted tally of all lines read, where ties are sorted
 323 # alphabetically, and where trailing bullets are added to quickly make
 324 # the tally counts comparable at a glance
 325 bully() {
 326     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 327         # reassure users by instantly showing the header
 328         BEGIN { print "value\ttally\tbullets"; fflush() }
 329 
 330         { gsub(/\r$/, ""); tally[$0]++ }
 331 
 332         END {
 333             # find the max tally, which is needed to build the bullets-string
 334             max = 0
 335             for (k in tally) {
 336                 if (max < tally[k]) max = tally[k]
 337             }
 338 
 339             # make enough bullets for all tallies: this loop makes growing the
 340             # string a task with complexity O(n * log n), instead of a naive
 341             # O(n**2), which can slow-down things when tallies are high enough
 342             bullets = "•"
 343             for (n = max; n > 1; n /= 2) {
 344                 bullets = bullets bullets
 345             }
 346 
 347             # emit unsorted output lines to the sort cmd, which will emit the
 348             # final reverse-sorted tally lines
 349             for (k in tally) {
 350                 s = substr(bullets, 1, tally[k])
 351                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 352             }
 353         }
 354     ' "$@"
 355 }
 356 
 357 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 358 # second by default; uses my script `waveout`
 359 busy() {
 360     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 361     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 362     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 363     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 364 }
 365 
 366 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 367 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 368 
 369 # keep all BUT the LAST n lines, or skip just the last line by default
 370 butlast() { head -n -"${1:-1}" "${2:--}"; }
 371 
 372 # load bytes from the filenames given
 373 bytes() { cat "$@"; }
 374 
 375 # quick alias for `cat`
 376 c() { cat "$@"; }
 377 
 378 # CAlculator with Nice numbers runs my script `ca` and colors results with
 379 # my script `nn`, alternating styles to make long numbers easier to read
 380 can() { ca "$@" | nn --gray; }
 381 
 382 # uppercase the first letter on each line, and lowercase all later letters
 383 capitalize() {
 384     awk '{ print; fflush() }' "$@" | sed -E 's-^(.*)-\L\1-; s-^(.)-\u\1-'
 385 }
 386 
 387 # conCATenate Lines guarantees no lines are ever accidentally joined
 388 # across inputs, always emitting a line-feed at the end of every line
 389 # catl() { awk '{ print; fflush() }' "$@"; }
 390 
 391 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 392 # carriage-returns, and guarantees no lines are ever accidentally joined
 393 # across inputs, always emitting a line-feed at the end of every line
 394 catl() {
 395     awk '
 396         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 397         { gsub(/\r$/, ""); print; fflush() }
 398     ' "$@"
 399 }
 400 
 401 # Csv AWK: CSV-specific input settings for `awk`
 402 # cawk() { awk --csv "$@"; }
 403 
 404 # Csv AWK: CSV-specific input settings for `awk`
 405 cawk() { stdbuf -oL awk --csv "$@"; }
 406 
 407 # Compile C Stripped
 408 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 409 
 410 # center-align lines of text, using the current screen width
 411 center() {
 412     awk -v width="$(tput cols)" '
 413         {
 414             gsub(/\r$/, "")
 415             lines[NR] = $0
 416             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
 417             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
 418             l = length
 419             if (maxlen < l) maxlen = l
 420         }
 421 
 422         END {
 423             n = (width - maxlen) / 2
 424             if (n % 1) n = n - (n % 1)
 425             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
 426             for (i = 1; i <= NR; i++) printf fmt, "", lines[i]
 427         }
 428     ' "$@"
 429 }
 430 
 431 # Colored Go Test on the folder given; uses my command `gbmawk`
 432 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; }
 433 
 434 # ignore final life-feed from text, if it's the very last byte; also ignore
 435 # all trailing carriage-returns
 436 choplf() {
 437     awk '
 438         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 439         NR > 1 { print ""; fflush() }
 440         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 441     ' "$@"
 442 }
 443 
 444 # Color Json using the `jq` app, allowing an optional filepath as the data
 445 # source, and even an optional transformation formula
 446 cj() { jq -C "${2:-.}" "${1:--}"; }
 447 
 448 # clean the screen, after running the command given
 449 # clean() { tput smcup; "$@"; tput rmcup; }
 450 
 451 # show a live digital clock
 452 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 453 
 454 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 455 # also emitting colors when piped
 456 clrg() { rg --color=always --line-buffered "$@"; }
 457 
 458 # CLear Screen, like the old dos command of the same name
 459 cls() { clear; }
 460 
 461 # COunt COndition: count how many times the AWK expression given is true
 462 coco() {
 463     local cond="${1:-1}"
 464     [ $# -gt 0 ] && shift
 465     awk "
 466         { low = lower = tolower(\$0) }
 467         ${cond} { count++ }
 468         END { print count }
 469     " "$@"
 470 }
 471 
 472 # Colored RipGrep ensures app `rg` emits colors when piped
 473 crg() { rg --color=always --line-buffered "$@"; }
 474 
 475 # emit a line with a repeating cross-like symbol in it
 476 crosses() {
 477     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 478 }
 479 
 480 # split lines using the string given, turning them into single-item lines
 481 crumble() {
 482     local sep="${1:- }"
 483     [ $# -gt 0 ] && shift
 484     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 485 }
 486 
 487 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 488 csv2tsv() { xsv fmt -t '\t' "$@"; }
 489 
 490 # Change Units turns common US units into international ones; uses my
 491 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 492 cu() {
 493     bu "$@" | awk '
 494         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 495         NF == 4 && $NF != "s" { print $NF }
 496     ' | nn --gray
 497 }
 498 
 499 # CURL Silent spares you the progress bar, but still tells you about errors
 500 curls() { curl --show-error -s "$@"; }
 501 
 502 # Count With AWK: count the times the AWK expression/condition given is true
 503 cwawk() {
 504     local cond="${1:-1}"
 505     [ $# -gt 0 ] && shift
 506     awk "
 507         { low = lower = tolower(\$0) }
 508         ${cond} { count++ }
 509         END { print count }
 510     " "$@"
 511 }
 512 
 513 # listen to streaming DANCE music
 514 dance() {
 515     printf "streaming \e[7mDance Wave Retro\e[0m\n"
 516     # mpv --quiet https://retro.dancewave.online/retrodance.mp3
 517     mpv --really-quiet https://retro.dancewave.online/retrodance.mp3
 518 }
 519 
 520 # emit a line with a repeating dash-like symbol in it
 521 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 522 
 523 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 524 # the leading data-URI declaration, if present
 525 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 526 
 527 # DECAPitate (lines) emits the first line as is, piping all lines after that
 528 # to the command given, passing all/any arguments/options to it
 529 # decap() {
 530 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 531 # }
 532 
 533 # turn Comma-Separated-Values tables into tab-separated-values tables
 534 # decsv() { xsv fmt -t '\t' "$@"; }
 535 
 536 # DEDUPlicate prevents lines from appearing more than once
 537 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 538 
 539 # dictionary-DEFine the word given, using an online service
 540 def() {
 541     local arg
 542     local gap=0
 543     for arg in "$@"; do
 544         [ "${gap}" -gt 0 ] && printf "\n"
 545         gap=1
 546         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 547         curl -s "dict://dict.org/d:${arg}" | awk '
 548             { gsub(/\r$/, "") }
 549             /^151 / {
 550                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 551                 next
 552             }
 553             /^[1-9][0-9]{2} / {
 554                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 555                 next
 556             }
 557             { print; fflush() }
 558         '
 559     done | less -JMKiCRS
 560 }
 561 
 562 # dictionary-define the word given, using an online service
 563 define() {
 564     local arg
 565     local gap=0
 566     for arg in "$@"; do
 567         [ "${gap}" -gt 0 ] && printf "\n"
 568         gap=1
 569         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 570         curl -s "dict://dict.org/d:${arg}" | awk '
 571             { gsub(/\r$/, "") }
 572             /^151 / {
 573                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 574                 next
 575             }
 576             /^[1-9][0-9]{2} / {
 577                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 578                 next
 579             }
 580             { print; fflush() }
 581         '
 582     done | less -JMKiCRS
 583 }
 584 
 585 # DEcompress GZip-encoded data
 586 # degz() { zcat "$@"; }
 587 
 588 # turn JSON Lines into a proper json array
 589 dejsonl() { jq -s -M "${@:-.}"; }
 590 
 591 # delay lines from the standard-input, waiting the number of seconds given
 592 # for each line, or waiting 1 second by default
 593 # delay() {
 594 #     local seconds="${1:-1}"
 595 #     (
 596 #         IFS="$(printf "\n")"
 597 #         while read -r line; do
 598 #             sleep "${seconds}"
 599 #             printf "%s\n" "${line}"
 600 #         done
 601 #     )
 602 # }
 603 
 604 # convert lines of Space(s)-Separated Values into lines of tab-separated values
 605 dessv() {
 606     awk '
 607         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 608 
 609         {
 610             gsub(/\r$/, "")
 611             for (i = 1; i <= NF; i++) {
 612                 if (i > 1) printf "\t"
 613                 printf "%s", $i
 614             }
 615             printf "\n"; fflush()
 616         }
 617     ' "$@"
 618 }
 619 
 620 # expand tabs each into up to the number of space given, or 4 by default
 621 detab() { expand -t "${1:-4}"; }
 622 
 623 # ignore trailing spaces, as well as trailing carriage returns
 624 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 625 
 626 # turn UTF-16 data into UTF-8
 627 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 628 
 629 # DIVide 2 numbers 3 ways, including the complement
 630 div() {
 631     awk -v a="${1:-1}" -v b="${2:-1}" '
 632         BEGIN {
 633             gsub(/_/, "", a)
 634             gsub(/_/, "", b)
 635             if (a > b) { c = a; a = b; b = c }
 636             c = 1 - a / b
 637             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 638             else printf "%f\n%f\n", a / b, b / a
 639             exit
 640         }'
 641 }
 642 
 643 # get/fetch data from the filename or URI given; named `dog` because dogs can
 644 # `fetch` things for you
 645 # dog() {
 646 #     if [ $# -gt 1 ]; then
 647 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 648 #         return 1
 649 #     fi
 650 #
 651 #     if [ -e "$1" ]; then
 652 #         cat "$1"
 653 #         return $?
 654 #     fi
 655 #
 656 #     case "${1:--}" in
 657 #         -) cat -;;
 658 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 659 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 660 #         dict://*|telnet://*) curl --show-error -s "$1";;
 661 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 662 #         *) curl --show-error -s "https://$1";;
 663 #     esac 2> /dev/null || {
 664 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 665 #         return 1
 666 #     }
 667 # }
 668 
 669 # emit a line with a repeating dot-like symbol in it
 670 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 671 
 672 # ignore/remove all matched regexes given on all stdin lines
 673 drop() {
 674     awk '
 675         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 676         {
 677             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 678             print; fflush()
 679         }
 680     ' "${@:-\r$}"
 681 }
 682 
 683 # show the current Date and Time
 684 dt() {
 685     printf "\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n" \
 686         "$(date +'%a %b %d')" "$(date +%T)"
 687 }
 688 
 689 # show the current Date, Time, and a Calendar with the 3 `current` months
 690 dtc() {
 691     {
 692         # show the current date/time center-aligned
 693         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
 694             "" "$(date +'%a %b %d')" "$(date +%T)"
 695         # debian linux has a different `cal` app which highlights the day
 696         if [ -e "/usr/bin/ncal" ]; then
 697             # fix debian/ncal's weird way to highlight the current day
 698             ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
 699         else
 700             cal -3
 701         fi
 702     } | less -JMKiCRS
 703 }
 704 
 705 # quick alias for `echo`
 706 e() { echo "$@"; }
 707 
 708 e4() { expand -t 4 "$@"; }
 709 
 710 e8() { expand -t 8 "$@"; }
 711 
 712 # Evaluate Awk expression
 713 ea() {
 714     local expr="${1:-0}"
 715     [ $# -gt 0 ] && shift
 716     awk "BEGIN { print ${expr}; exit }" "$@"
 717 }
 718 
 719 # EDit RUN shell commands, using an interactive editor
 720 edrun() { . <( micro -readonly true -filetype shell | leak --inv ); }
 721 
 722 # Extended-mode Grep, enabling its full regex syntax
 723 eg() { grep -E --line-buffered "$@"; }
 724 
 725 # Extended Grep, Recursive Interactive and Plain
 726 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 727 
 728 # show all empty files in a folder, digging recursively
 729 emptyfiles() {
 730     local arg
 731     for arg in "${@:-.}"; do
 732         if [ ! -d "${arg}" ]; then
 733             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 734             return 1
 735         fi
 736         stdbuf -oL find "${arg}" -type f -empty
 737     done
 738 }
 739 
 740 # show all empty folders in a folder, digging recursively
 741 emptyfolders() {
 742     local arg
 743     for arg in "${@:-.}"; do
 744         if [ ! -d "${arg}" ]; then
 745             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 746             return 1
 747         fi
 748         stdbuf -oL find "${arg}" -type d -empty
 749     done
 750 }
 751 
 752 # Evaluate Nodejs expression
 753 # en() {
 754 #     local expr="${1:-null}"
 755 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 756 #     node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 757 # }
 758 
 759 # Evaluate Python expression
 760 ep() { python -c "print(${1:-None})"; }
 761 
 762 # Extended Plain Interactive Grep
 763 epig() { ugrep --color=never -Q -E "$@"; }
 764 
 765 # Extended Plain Recursive Interactive Grep
 766 eprig() { ugrep --color=never -Q -E "$@"; }
 767 
 768 # Evaluate Ruby expression
 769 # er() { ruby -e "puts ${1:-nil}"; }
 770 
 771 # Edit Run shell commands, using an interactive editor
 772 er() { . <( micro -readonly true -filetype shell | leak --inv ); }
 773 
 774 # ignore/remove all matched regexes given on all stdin lines
 775 erase() {
 776     awk '
 777         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 778         {
 779             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 780             print; fflush()
 781         }
 782     ' "${@:-\r$}"
 783 }
 784 
 785 # Editor Read-Only
 786 ero() { micro -readonly true "$@"; }
 787 
 788 # Extended-mode Sed, enabling its full regex syntax
 789 es() { sed -E -u "$@"; }
 790 
 791 # Expand Tabs each into up to the number of space given, or 4 by default
 792 et() { expand -t "${1:-4}"; }
 793 
 794 # convert EURos into CAnadian Dollars, using the latest official exchange
 795 # rates from the bank of canada; during weekends, the latest rate may be
 796 # from a few days ago; the default amount of euros to convert is 1, when
 797 # not given
 798 eur2cad() {
 799     local site='https://www.bankofcanada.ca/valet/observations/group'
 800     local csv_rates="${site}/FX_RATES_DAILY/csv"
 801     local url
 802     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 803     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 804         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 805         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 806 }
 807 
 808 # EValuate AWK expression
 809 evawk() {
 810     local expr="${1:-0}"
 811     [ $# -gt 0 ] && shift
 812     awk "BEGIN { print ${expr}; exit }" "$@"
 813 }
 814 
 815 # get various currency EXchange RATES
 816 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/$1"; }
 817 
 818 # get various currency EXchange RATES
 819 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-USD}"; }
 820 
 821 # get various currency EXchange RATES
 822 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-EUR}"; }
 823 
 824 # get various currency EXchange RATES
 825 exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-CAD}"; }
 826 
 827 # convert fahrenheit into celsius
 828 fahrenheit() {
 829     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 830         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 831 }
 832 
 833 # Flushed AWK
 834 fawk() { stdbuf -oL awk "$@"; }
 835 
 836 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 837 fetch() {
 838     local a
 839     for a in "$@"; do
 840         case "$a" in
 841             file://*|https://*|http://*) curl --show-error -s "$a";;
 842             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 843             dict://*|telnet://*) curl --show-error -s "$a";;
 844             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 845             *) curl --show-error -s "https://$a";;
 846         esac
 847     done
 848 }
 849 
 850 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 851 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 852 
 853 # show all files in a folder, digging recursively
 854 files() {
 855     local arg
 856     for arg in "${@:-.}"; do
 857         if [ ! -d "${arg}" ]; then
 858             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 859             return 1
 860         fi
 861         stdbuf -oL find "${arg}" -type f
 862     done
 863 }
 864 
 865 # recursively find all files with fewer bytes than the number given
 866 filesunder() {
 867     local n
 868     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 869     [ $# -gt 0 ] && shift
 870 
 871     local arg
 872     for arg in "${@:-.}"; do
 873         if [ ! -d "${arg}" ]; then
 874             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 875             return 1
 876         fi
 877         stdbuf -oL find "${arg}" -type f -size -"$n"c
 878     done
 879 }
 880 
 881 # get the first n lines, or 1 by default
 882 first() { head -n "${1:-1}" "${2:--}"; }
 883 
 884 # limit data up to the first n bytes
 885 firstbytes() { head -c "$1" "${2:--}"; }
 886 
 887 # get the first n lines, or 1 by default
 888 firstlines() { head -n "${1:-1}" "${2:--}"; }
 889 
 890 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 891 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 892 # and ensuring each input's last line ends with a line-feed
 893 fixlines() {
 894     awk '
 895         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 896         { gsub(/\r$/, ""); print; fflush() }
 897     ' "$@"
 898 }
 899 
 900 # FLushed AWK
 901 # flawk() { stdbuf -oL awk "$@"; }
 902 
 903 # First Line AWK, emits the first line as is, and uses the rest of the args
 904 # given by injecting the first into the script, and passing all later args as
 905 # later args to `awk` as given
 906 flawk() {
 907     local code="${1:-1}"
 908     [ $# -gt 0 ] && shift
 909     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 910 }
 911 
 912 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 913 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 914 # involving several steps
 915 fleak() {
 916     awk '
 917         {
 918             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 919             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 920             print; fflush()
 921         }
 922     ' "$@"
 923 }
 924 
 925 # try to run the command given using line-buffering for its (standard) output
 926 flushlines() { stdbuf -oL "$@"; }
 927 
 928 # show all folders in a folder, digging recursively
 929 folders() {
 930     local arg
 931     for arg in "${@:-.}"; do
 932         if [ ! -d "${arg}" ]; then
 933             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 934             return 1
 935         fi
 936         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 937     done
 938 }
 939 
 940 # start from the line number given, skipping all previous ones
 941 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 942 
 943 # convert FeeT into meters
 944 ft() {
 945     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 946         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 947 }
 948 
 949 # convert FeeT² (squared) into meters²
 950 ft2() {
 951     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 952         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 953 }
 954 
 955 # Get/fetch data from the filenames/URIs given; uses my script `get`
 956 # g() { get "$@"; }
 957 
 958 # run `grep` in extended-regex mode, enabling its full regex syntax
 959 # g() { grep -E --line-buffered "$@"; }
 960 
 961 # convert GALlons into liters
 962 gal() {
 963     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 964         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 965 }
 966 
 967 # convert binary GigaBytes into bytes
 968 gb() {
 969     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 970         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 971         sed 's-\.00*$--'
 972 }
 973 
 974 # glue/stick together various lines, only emitting a line-feed at the end; an
 975 # optional argument is the output-item-separator, which is empty by default
 976 glue() {
 977     local sep="${1:-}"
 978     [ $# -gt 0 ] && shift
 979     awk -v sep="${sep}" '
 980         NR > 1 { printf "%s", sep }
 981         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 982         END { if (NR > 0) print ""; fflush() }
 983     ' "$@"
 984 }
 985 
 986 # GO Build Stripped: a common use-case for the go compiler
 987 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 988 
 989 # GO DEPendencieS: show all dependencies in a go project
 990 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 991 
 992 # GO IMPortS: show all imports in a go project
 993 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
 994 
 995 # go to the folder picked using an interactive TUI; uses my script `bf`
 996 goto() {
 997     local where
 998     where="$(bf "${1:-.}")"
 999     if [ $? -ne 0 ]; then
1000         return 0
1001     fi
1002 
1003     where="$(realpath "${where}")"
1004     if [ ! -d "${where}" ]; then
1005         where="$(dirname "${where}")"
1006     fi
1007     cd "${where}" || return
1008 }
1009 
1010 # GRayed-out lines with AWK
1011 grawk() {
1012     local cond="${1:-1}"
1013     [ $# -gt 0 ] && shift
1014     awk "${cond}"' {
1015             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
1016             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
1017             next
1018         }
1019         { print; fflush() }
1020     ' "$@"
1021 }
1022 
1023 # Style lines using a GRAY-colored BACKground
1024 grayback() {
1025     awk '
1026         {
1027             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
1028             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
1029         }
1030     ' "$@"
1031 }
1032 
1033 # Grep, Recursive Interactive and Plain
1034 # grip() { ugrep -r -Q --color=never -E "$@"; }
1035 
1036 # Global extended regex SUBstitute, using the AWK function of the same name:
1037 # arguments are used as regex/replacement pairs, in that order
1038 gsub() {
1039     awk '
1040         BEGIN {
1041             for (i = 1; i < ARGC; i++) {
1042                 args[++n] = ARGV[i]
1043                 delete ARGV[i]
1044             }
1045         }
1046         {
1047             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1048             print; fflush()
1049         }
1050     ' "$@"
1051 }
1052 
1053 # show Help laid out on 2 side-by-side columns; uses my script `bsbs`
1054 h2() { naman "$@" | bsbs 2; }
1055 
1056 # Highlight (lines) with AWK
1057 hawk() {
1058     local cond="${1:-1}"
1059     [ $# -gt 0 ] && shift
1060     awk '
1061         { low = lower = tolower($0) }
1062         '"${cond}"' {
1063             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1064             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1065             next
1066         }
1067         { print; fflush() }
1068     ' "$@"
1069 }
1070 
1071 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1072 # second by default; uses my script `waveout`
1073 heartbeat() {
1074     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1075     local b='((12, u), (8, (u-0.25)%1))'
1076     local f="sum($a for v in $b) / 2"
1077     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1078     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1079     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1080 }
1081 
1082 # Highlighted-style ECHO
1083 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1084 
1085 # show each byte as a pair of HEXadecimal (base-16) symbols
1086 hexify() {
1087     cat "$@" | od -x -A n |
1088         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1089 }
1090 
1091 # HIghlighted-style ECHO
1092 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1093 
1094 # highlight lines
1095 highlight() {
1096     awk '
1097         {
1098             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1099             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1100         }
1101     ' "$@"
1102 }
1103 
1104 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1105 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1106 # involving several steps
1107 hileak() {
1108     awk '
1109         {
1110             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1111             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1112             print; fflush()
1113         }
1114     ' "$@"
1115 }
1116 
1117 # highlight lines
1118 hilite() {
1119     awk '
1120         {
1121             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1122             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1123         }
1124     ' "$@"
1125 }
1126 
1127 # Help Me Remember my custom shell commands
1128 hmr() {
1129     local cmd="bat"
1130     # debian linux uses a different name for the `bat` app
1131     if [ -e "/usr/bin/batcat" ]; then
1132         cmd="batcat"
1133     fi
1134 
1135     "$cmd" \
1136         --style=plain,header,numbers --theme='Monokai Extended Light' \
1137         --wrap=never --color=always "$(which clam)" |
1138             sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1139 }
1140 
1141 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1142 hms() {
1143     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1144         x = $0
1145         h = (x - x % 3600) / 3600
1146         m = (x % 3600) / 60
1147         s = x % 60
1148         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1149     }'
1150 }
1151 
1152 # find all hyperlinks inside HREF attributes in the input text
1153 href() {
1154     awk '
1155         BEGIN { e = "href=\"[^\"]+\"" }
1156         {
1157             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1158                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1159             }
1160         }
1161     ' "$@"
1162 }
1163 
1164 # Index all lines starting from 0, using a tab right after each line number
1165 # i() {
1166 #     local start="${1:-0}"
1167 #     [ $# -gt 0 ] && shift
1168 #     nl -b a -w 1 -v "${start}" "$@"
1169 # }
1170 
1171 # Index all lines starting from 0, using a tab right after each line number
1172 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1173 
1174 # avoid/ignore lines which case-insensitively match any of the regexes given
1175 iavoid() {
1176     awk '
1177         BEGIN {
1178             if (IGNORECASE == "") {
1179                 m = "this variant of AWK lacks case-insensitive regex-matching"
1180                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1181                 exit 125
1182             }
1183             IGNORECASE = 1
1184 
1185             for (i = 1; i < ARGC; i++) {
1186                 e[i] = ARGV[i]
1187                 delete ARGV[i]
1188             }
1189         }
1190 
1191         {
1192             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1193             print; fflush(); got++
1194         }
1195 
1196         END { exit(got == 0) }
1197     ' "${@:-^\r?$}"
1198 }
1199 
1200 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1201 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1202 
1203 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1204 idrop() {
1205     awk '
1206         BEGIN {
1207             if (IGNORECASE == "") {
1208                 m = "this variant of AWK lacks case-insensitive regex-matching"
1209                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1210                 exit 125
1211             }
1212             IGNORECASE = 1
1213 
1214             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1215         }
1216 
1217         {
1218             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1219             print; fflush()
1220         }
1221     ' "${@:-\r$}"
1222 }
1223 
1224 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1225 ierase() {
1226     awk '
1227         BEGIN {
1228             if (IGNORECASE == "") {
1229                 m = "this variant of AWK lacks case-insensitive regex-matching"
1230                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1231                 exit 125
1232             }
1233             IGNORECASE = 1
1234 
1235             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1236         }
1237 
1238         {
1239             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1240             print; fflush()
1241         }
1242     ' "${@:-\r$}"
1243 }
1244 
1245 # ignore command in a pipe: this allows quick re-editing of pipes, while
1246 # still leaving signs of previously-used steps, as a memo
1247 ignore() { cat; }
1248 
1249 # only keep lines which case-insensitively match any of the regexes given
1250 imatch() {
1251     awk '
1252         BEGIN {
1253             if (IGNORECASE == "") {
1254                 m = "this variant of AWK lacks case-insensitive regex-matching"
1255                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1256                 exit 125
1257             }
1258             IGNORECASE = 1
1259 
1260             for (i = 1; i < ARGC; i++) {
1261                 e[i] = ARGV[i]
1262                 delete ARGV[i]
1263             }
1264         }
1265 
1266         {
1267             for (i = 1; i < ARGC; i++) {
1268                 if ($0 ~ e[i]) {
1269                     print; fflush()
1270                     got++
1271                     next
1272                 }
1273             }
1274         }
1275 
1276         END { exit(got == 0) }
1277     ' "${@:-[^\r]}"
1278 }
1279 
1280 # start each non-empty line with extra n spaces
1281 indent() {
1282     awk '
1283         BEGIN {
1284             n = ARGV[1] + 0
1285             delete ARGV[1]
1286             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
1287         }
1288 
1289         /^\r?$/ { print ""; fflush(); next }
1290         { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() }
1291     ' "$@"
1292 }
1293 
1294 # listen to INTENSE streaming radio
1295 intense() {
1296     printf "streaming \e[7mIntense Radio\e[0m\n"
1297     mpv --quiet https://secure.live-streams.nl/flac.flac
1298 }
1299 
1300 # show public-IP-related INFOrmation
1301 # ipinfo() { curl -s ipinfo.io; }
1302 
1303 # show public-IP-related INFOrmation
1304 ipinfo() { curl -s ipinfo.io | jq; }
1305 
1306 # emit each word-like item from each input line on its own line; when a file
1307 # has tabs on its first line, items are split using tabs alone, which allows
1308 # items to have spaces in them
1309 items() {
1310     awk '
1311         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
1312         { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() }
1313     ' "$@"
1314 }
1315 
1316 # case-insensitively deduplicate lines, keeping them in their original order:
1317 # the checking/matching is case-insensitive, but each first match is output
1318 # exactly as is
1319 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1320 
1321 # shrink/compact Json data, allowing an optional filepath
1322 # j0() { python -m json.tool --compact "${1:--}"; }
1323 
1324 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1325 # even an optional transformation formula after that
1326 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1327 
1328 # show Json data on multiple lines, using 2 spaces for each indentation level,
1329 # allowing an optional filepath
1330 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1331 
1332 # show Json data on multiple lines, using 2 spaces for each indentation level,
1333 # allowing an optional filepath, and even an optional transformation formula
1334 # after that
1335 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1336 
1337 # listen to streaming JAZZ music
1338 jazz() {
1339     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1340     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1341     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1342 }
1343 
1344 # show a `dad` JOKE from the web, sometimes even a very funny one
1345 # joke() {
1346 #     curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--'
1347 #     # plain-text output from previous cmd doesn't end with a line-feed
1348 #     printf "\n"
1349 # }
1350 
1351 # show a `dad` JOKE from the web, sometimes even a very funny one
1352 joke() {
1353     curl --show-error -s https://icanhazdadjoke.com | fold -s |
1354         awk '{ gsub(/ *\r?$/, ""); print }'
1355 }
1356 
1357 # shrink/compact JSON data, allowing an optional filepath
1358 # json0() { python -m json.tool --compact "${1:--}"; }
1359 
1360 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1361 # even an optional transformation formula after that
1362 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1363 
1364 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1365 # allowing an optional filepath
1366 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1367 
1368 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1369 # allowing an optional filepath, and even an optional transformation formula
1370 # after that
1371 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1372 
1373 # turn JSON Lines into a proper JSON array
1374 jsonl2json() { jq -s -M "${@:-.}"; }
1375 
1376 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1377 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1378 
1379 # only keep the file-extension part from lines ending with file-extensions
1380 # justext() {
1381 #     awk '
1382 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1383 #     ' "$@"
1384 # }
1385 
1386 # only keep the file-extension part from lines ending with file-extensions
1387 justext() {
1388     awk '
1389         !/^\./ && /\./ {
1390             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1391                 print substr($0, RSTART, RLENGTH); fflush()
1392             }
1393         }
1394     ' "$@"
1395 }
1396 
1397 # only keep lines ending with a file-extension of any popular picture format
1398 justpictures() {
1399     awk '
1400         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1401             gsub(/ *\r?$/, ""); print; fflush()
1402         }
1403     ' "$@"
1404 }
1405 
1406 # only keep lines ending with a file-extension of any popular sound format
1407 justsounds() {
1408     awk '
1409         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1410             gsub(/ *\r?$/, ""); print; fflush()
1411         }
1412     ' "$@"
1413 }
1414 
1415 # only keep lines ending with a file-extension of any popular video format
1416 justvideos() {
1417     awk '
1418         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1419             gsub(/ *\r?$/, ""); print; fflush()
1420         }
1421     ' "$@"
1422 }
1423 
1424 # convert binary KiloBytes into bytes
1425 kb() {
1426     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1427         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1428         sed 's-\.00*$--'
1429 }
1430 
1431 # run `less`, showing line numbers, among other settings
1432 l() { less -JMKNiCRS "$@"; }
1433 
1434 # Like A Book groups lines as 2 side-by-side pages, the same way books
1435 # do it; uses my script `book`
1436 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1437 
1438 # find the LAN (local-area network) IP address for this device
1439 lanip() { hostname -I; }
1440 
1441 # Line xARGS: `xargs` using line separators, which handles filepaths
1442 # with spaces, as long as the standard input has 1 path per line
1443 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; }
1444 
1445 # Line xARGS: `xargs` using line separators, which handles filepaths
1446 # with spaces, as long as the standard input has 1 path per line
1447 largs() {
1448     awk -v ORS='\000' '
1449         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1450         { gsub(/\r$/, ""); print; fflush() }
1451     ' | xargs -0 "$@"
1452 }
1453 
1454 # get the last n lines, or 1 by default
1455 last() { tail -n "${1:-1}" "${2:--}"; }
1456 
1457 # get up to the last given number of bytes
1458 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1459 
1460 # get the last n lines, or 1 by default
1461 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1462 
1463 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1464 # as given, and where all other symbols become question marks, one question
1465 # mark for each code-point byte
1466 latinize() {
1467     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1468 }
1469 
1470 # Lowercased (lines) AWK
1471 lawk() {
1472     local code="${1:-1}"
1473     [ $# -gt 0 ] && shift
1474     awk "
1475         {
1476             line = orig = original = \$0
1477             low = lower = tolower(\$0)
1478             \$0 = lower
1479         }
1480         ${code}
1481         { fflush() }
1482     " "$@";
1483 }
1484 
1485 # convert pounds (LB) into kilograms
1486 lb() {
1487     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1488         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1489 }
1490 
1491 # turn the first n space-separated fields on each line into tab-separated
1492 # ones; this behavior is useful to make the output of many cmd-line tools
1493 # into TSV, since filenames are usually the last fields, and these may
1494 # contain spaces which aren't meant to be split into different fields
1495 leadtabs() {
1496     local n="$1"
1497     local cmd
1498     cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1499     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1500     sed -u -E "${cmd}"
1501 }
1502 
1503 # run `less`, showing line numbers, among other settings
1504 least() { less -JMKNiCRS "$@"; }
1505 
1506 # limit stops at the first n bytes, or 1024 bytes by default
1507 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1508 
1509 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1510 # and using the first n lines as a sticky-header (1 by default), so they
1511 # always show on top
1512 lh() {
1513     local n="${1:-1}"
1514     [ $# -gt 0 ] && shift
1515     less --header="$n" -JMKNiCRS "$@"
1516 }
1517 
1518 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1519 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1520 # and ensuring each input's last line ends with a line-feed
1521 lines() {
1522     awk '
1523         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1524         { gsub(/\r$/, ""); print; fflush() }
1525     ' "$@"
1526 }
1527 
1528 # regroup adjacent lines into n-item tab-separated lines
1529 lineup() {
1530     local n="${1:-0}"
1531     [ $# -gt 0 ] && shift
1532 
1533     if [ "$n" -le 0 ]; then
1534         awk '
1535             NR > 1 { printf "\t" }
1536             { printf "%s", $0; fflush() }
1537             END { if (NR > 0) print "" }
1538         ' "$@"
1539         return $?
1540     fi
1541 
1542     awk -v n="$n" '
1543         NR % n != 1 && n > 1 { printf "\t" }
1544         { printf "%s", $0; fflush() }
1545         NR % n == 0 { print ""; fflush() }
1546         END { if (NR % n != 0) print "" }
1547     ' "$@"
1548 }
1549 
1550 # find all hyperLINKS (https:// and http://) in the input text
1551 links() {
1552     awk '
1553         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1554         {
1555             # match all links in the current line
1556             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1557                 print substr(s, RSTART, RLENGTH); fflush()
1558             }
1559         }
1560     ' "$@"
1561 }
1562 
1563 # List files, using the `Long` option
1564 # ll() { ls -l "$@"; }
1565 
1566 # LOAD data from the filename or URI given; uses my script `get`
1567 load() { get "$@"; }
1568 
1569 # LOwercase line, check (awk) COndition: on each success, the original line
1570 # is output with its original letter-casing, as its lower-cased version is
1571 # only a convenience meant for the condition
1572 loco() {
1573     local cond="${1:-1}"
1574     [ $# -gt 0 ] && shift
1575     awk "
1576         {
1577             line = orig = original = \$0
1578             low = lower = tolower(\$0)
1579             \$0 = lower
1580         }
1581         ${cond} { print line; fflush() }
1582     " "$@"
1583 }
1584 
1585 # LOcal SERver webserves files in a folder as localhost, using the port
1586 # number given, or port 8080 by default
1587 loser() {
1588     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1589     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1590 }
1591 
1592 # LOWercase all ASCII symbols
1593 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1594 
1595 # LOWERcase all ASCII symbols
1596 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1597 
1598 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1599 lrg() { rg --line-buffered "$@"; }
1600 
1601 # Listen To Youtube
1602 lty() {
1603     local url
1604     # some youtube URIs end with extra playlist/tracker parameters
1605     url="$(echo "$1" | sed 's-&.*--')"
1606     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1607 }
1608 
1609 # only keep lines which match any of the regexes given
1610 match() {
1611     awk '
1612         BEGIN {
1613             for (i = 1; i < ARGC; i++) {
1614                 e[i] = ARGV[i]
1615                 delete ARGV[i]
1616             }
1617         }
1618 
1619         {
1620             for (i = 1; i < ARGC; i++) {
1621                 if ($0 ~ e[i]) {
1622                     print; fflush()
1623                     got++
1624                     next
1625                 }
1626             }
1627         }
1628 
1629         END { exit(got == 0) }
1630     ' "${@:-[^\r]}"
1631 }
1632 
1633 # MAX Width truncates lines up to the given number of items/bytes given, or up
1634 # to 80 by default; output lines end with an ANSI reset-code, in case input
1635 # lines use ANSI styles
1636 maxw() {
1637     local maxwidth="${1:-80}"
1638     [ $# -gt 0 ] && shift
1639     awk -v maxw="${maxwidth}" '
1640         {
1641             gsub(/\r$/, "")
1642             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1643         }
1644     ' "$@"
1645 }
1646 
1647 # convert binary MegaBytes into bytes
1648 mb() {
1649     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1650         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1651         sed 's-\.00*$--'
1652 }
1653 
1654 # Multi-Core MAKE runs `make` using all cores
1655 mcmake() { make -j "$(nproc)" "$@"; }
1656 
1657 # Multi-Core MaKe runs `make` using all cores
1658 mcmk() { make -j "$(nproc)" "$@"; }
1659 
1660 # merge stderr into stdout, without any ugly keyboard-dancing
1661 # merrge() { "$@" 2>&1; }
1662 
1663 # convert MIles into kilometers
1664 mi() {
1665     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1666         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1667 }
1668 
1669 # convert MIles² (squared) into kilometers²
1670 mi2() {
1671     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1672         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1673 }
1674 
1675 # Make In Folder
1676 mif() {
1677     local code
1678     pushd "${1:-.}" > /dev/null || return
1679     [ $# -gt 0 ] && shift
1680     make "$@"
1681     code=$?
1682     popd > /dev/null || return "${code}"
1683     return "${code}"
1684 }
1685 
1686 # Media INFO
1687 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1688 
1689 # Media INFO
1690 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1691 
1692 # run `make`
1693 mk() { make "$@"; }
1694 
1695 # convert Miles Per Hour into kilometers per hour
1696 mph() {
1697     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1698         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1699 }
1700 
1701 # Number all lines, using a tab right after each line number
1702 # n() {
1703 #     local start="${1:-1}"
1704 #     [ $# -gt 0 ] && shift
1705 #     nl -b a -w 1 -v "${start}" "$@"
1706 # }
1707 
1708 # Number all lines, using a tab right after each line number
1709 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1710 
1711 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1712 naman() {
1713     local w
1714     w="$(tput cols)"
1715     if [ "$w" -gt 100 ]; then
1716         w="$((w / 2 - 1))"
1717     fi
1718     MANWIDTH="$w" man "$@"
1719 }
1720 
1721 # Not AND sorts its 2 inputs, then finds lines not in common
1722 nand() {
1723     # comm -3 <(sort "$1") <(sort "$2")
1724     # dash doesn't support the process-sub syntax
1725     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1726 }
1727 
1728 # Nice Byte Count, using my scripts `nn` and `cext`
1729 nbc() { wc -c "$@" | nn --gray | cext; }
1730 
1731 # listen to streaming NEW WAVE music
1732 newwave() {
1733     printf "streaming \e[7mNew Wave radio\e[0m\n"
1734     mpv --quiet https://puma.streemlion.com:2910/stream
1735 }
1736 
1737 # NIce(r) COlumns makes the output of many commands whose output starts with
1738 # a header line easier to read; uses my script `nn`
1739 nico() {
1740     awk '
1741         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1742         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1743     ' "$@" | nn --gray | less -JMKiCRS
1744 }
1745 
1746 # emit nothing to output and/or discard everything from input
1747 nil() {
1748     if [ $# -gt 0 ]; then
1749         "$@" > /dev/null
1750     else
1751         cat < /dev/null
1752     fi
1753 }
1754 
1755 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1756 njnn() { nj "$@" | nn --gray; }
1757 
1758 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1759 nman() {
1760     local w
1761     w="$(tput cols)"
1762     if [ "$w" -gt 100 ]; then
1763         w="$((w / 2 - 1))"
1764     fi
1765     MANWIDTH="$w" man "$@"
1766 }
1767 
1768 # convert Nautical MIles into kilometers
1769 nmi() {
1770     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1771         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1772 }
1773 
1774 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1775 # noerr() { "$@" 2> /dev/null; }
1776 
1777 # play a white-noise sound lasting the number of seconds given, or for 1
1778 # second by default; uses my script `waveout`
1779 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1780 
1781 # ignore trailing spaces, as well as trailing carriage returns
1782 notrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
1783 
1784 # show the current date and time
1785 now() { date +'%Y-%m-%d %H:%M:%S'; }
1786 
1787 # Nice Processes shows/lists all current processes; uses my script `nn`
1788 np() {
1789     local res
1790     local code
1791     # res="$(ps "${@:-auxf}")"
1792     res="$(ps "${@:-aux}")"
1793     code=$?
1794     if [ "${code}" -ne 0 ]; then
1795         return "${code}"
1796     fi
1797 
1798     echo "${res}" | awk '
1799         BEGIN {
1800             d = strftime("%a %b %d")
1801             t = strftime("%H:%M:%S")
1802             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1803         }
1804 
1805         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1806 
1807         $1 == "root" {
1808             gsub(/^/, "\x1b[38;2;52;101;164m")
1809             gsub(/ +/, "&\x1b[0m\x1b[38;2;52;101;164m")
1810             gsub(/$/, "\x1b[0m")
1811         }
1812 
1813         {
1814             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1815             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1816             printf "%3d  %s\n", NR - 1, $0
1817         }
1818     ' | nn --gray | less -JMKiCRS
1819 }
1820 
1821 # Nice Size, using my scripts `nn` and `cext`
1822 ns() { wc -c "$@" | nn --gray | cext; }
1823 
1824 # Nice Transform Json, using my scripts `tj`, and `nj`
1825 ntj() { tj "$@" | nj; }
1826 
1827 # Nice TimeStamp
1828 nts() {
1829     ts '%Y-%m-%d %H:%M:%S' |
1830         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1831 }
1832 
1833 # emit nothing to output and/or discard everything from input
1834 null() {
1835     if [ $# -gt 0 ]; then
1836         "$@" > /dev/null
1837     else
1838         cat < /dev/null
1839     fi
1840 }
1841 
1842 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1843 # line-feed byte
1844 nullines() {
1845     awk -v ORS='\000' '
1846         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1847         { gsub(/\r$/, ""); print; fflush() }
1848     ' "$@"
1849 }
1850 
1851 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1852 # the syntax of shell functions
1853 nwat() {
1854     local a
1855     local gap=0
1856 
1857     if [ $# -eq 0 ]; then
1858         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1859         return 1
1860     fi
1861 
1862     local cmd="bat"
1863     # debian linux uses a different name for the `bat` app
1864     if [ -e "/usr/bin/batcat" ]; then
1865         cmd="batcat"
1866     fi
1867 
1868     for a in "$@"; do
1869         [ "${gap}" -gt 0 ] && printf "\n"
1870         gap=1
1871         # printf "\e[7m%-80s\e[0m\n" "$a"
1872         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1873 
1874         # resolve 1 alias level
1875         if alias "$a" 2> /dev/null > /dev/null; then
1876             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1877         fi
1878 
1879         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1880             # resolved aliases with args/spaces in them would otherwise fail
1881             echo "$a"
1882         elif whence -f "$a" > /dev/null 2> /dev/null; then
1883             # zsh seems to show a shell function's code only via `whence -f`
1884             whence -f "$a"
1885         elif type "$a" > /dev/null 2> /dev/null; then
1886             # dash doesn't support `declare`, and `type` in bash emits
1887             # a redundant first output line, when it's a shell function
1888             type "$a" | awk '
1889                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1890                 { print; fflush() }
1891                 END { if (NR < 2 && skipped) print skipped }
1892             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1893                 --wrap=never --color=always |
1894                     sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1895         else
1896             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1897         fi
1898     done | less -JMKiCRS
1899 }
1900 
1901 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1902 # alternating styles to make long numbers easier to read
1903 # nwc() { wc "$@" | nn --gray; }
1904 
1905 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1906 # alternating styles to make long numbers easier to read
1907 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1908 
1909 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1910 # `cext`, alternating styles to make long numbers easier to read
1911 nwc() {
1912     wc "$@" | sort -rn | nn --gray | cext |
1913         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1914 }
1915 
1916 # Nice Weather Forecast
1917 nwf() {
1918     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
1919     curl --show-error -s telnet://graph.no:79 |
1920     sed -E \
1921         -e 's/ *\r?$//' \
1922         -e '/^\[/d' \
1923         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
1924         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
1925         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
1926         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
1927         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
1928         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
1929         -e 's/\*/○/g' |
1930     awk 1 |
1931     less -JMKiCRS
1932 }
1933 
1934 # Nice Zoom Json, using my scripts `zj`, and `nj`
1935 nzj() { zj "$@" | nj; }
1936 
1937 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1938 # pawk() { awk -F='' -v RS='' "$@"; }
1939 
1940 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1941 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1942 
1943 # Plain `fd`
1944 pfd() { fd --color=never "$@"; }
1945 
1946 # pick lines, using all the 1-based line-numbers given
1947 picklines() {
1948     awk '
1949         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1950         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1951         { l[++n] = $0 }
1952         END {
1953             for (i = 1; i <= m; i++) {
1954                 j = p[i]
1955                 if (j < 0) j += NR + 1
1956                 if (0 < j && j <= NR) print l[j]
1957             }
1958         }
1959     ' "$@"
1960 }
1961 
1962 # Plain Interactive Grep
1963 pig() { ugrep --color=never -Q -E "$@"; }
1964 
1965 # make text plain, by ignoring ANSI terminal styling
1966 plain() {
1967     awk '
1968         {
1969             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1970             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
1971             print; fflush()
1972         }
1973     ' "$@"
1974 }
1975 
1976 # end all lines with an ANSI-code to reset styles
1977 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1978 
1979 # end all lines with an ANSI-code to reset styles
1980 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1981 
1982 # play audio/video media
1983 # play() { mplayer -msglevel all=-1 "${@:--}"; }
1984 
1985 # play audio/video media
1986 play() { mpv "${@:--}"; }
1987 
1988 # Pick LINE, using the 1-based line-number given
1989 pline() {
1990     local line="$1"
1991     [ $# -gt 0 ] && shift
1992     awk -v n="${line}" '
1993         BEGIN { if (n < 1) exit 0 }
1994         NR == n { print; exit 0 }
1995     ' "$@"
1996 }
1997 
1998 # Paused MPV; especially useful when trying to view pictures via `mpv`
1999 pmpv() { mpv --pause "${@:--}"; }
2000 
2001 # Print Python result
2002 pp() { python -c "print($1)"; }
2003 
2004 # PRecede (input) ECHO, prepends a first line to stdin lines
2005 precho() { echo "$@" && cat /dev/stdin; }
2006 
2007 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
2008 prememo() {
2009     awk '
2010         BEGIN {
2011             if (ARGC > 1) printf "\x1b[7m"
2012             for (i = 1; i < ARGC; i++) {
2013                 if (i > 1) printf " "
2014                 printf "%s", ARGV[i]
2015                 delete ARGV[i]
2016             }
2017             if (ARGC > 1) printf "\x1b[0m\n"
2018             fflush()
2019         }
2020         { print; fflush() }
2021     ' "$@"
2022 }
2023 
2024 # start by joining all arguments given as a tab-separated-items line of output,
2025 # followed by all lines from stdin verbatim
2026 pretsv() {
2027     awk '
2028         BEGIN {
2029             for (i = 1; i < ARGC; i++) {
2030                 if (i > 1) printf "\t"
2031                 printf "%s", ARGV[i]
2032                 delete ARGV[i]
2033             }
2034             if (ARGC > 1) printf "\n"
2035             fflush()
2036         }
2037         { print; fflush() }
2038     ' "$@"
2039 }
2040 
2041 # Plain Recursive Interactive Grep
2042 prig() { ugrep --color=never -r -Q -E "$@"; }
2043 
2044 # show/list all current processes
2045 processes() {
2046     local res
2047     res="$(ps aux)"
2048     echo "${res}" | awk '!/ps aux$/' | sed -E \
2049         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
2050         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
2051 }
2052 
2053 # Play Youtube Audio
2054 pya() {
2055     local url
2056     # some youtube URIs end with extra playlist/tracker parameters
2057     url="$(echo "$1" | sed 's-&.*--')"
2058     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2059 }
2060 
2061 # Quiet ignores stderr, without any ugly keyboard-dancing
2062 q() { "$@" 2> /dev/null; }
2063 
2064 # Quiet MPV
2065 qmpv() { mpv --quiet "${@:--}"; }
2066 
2067 # ignore stderr, without any ugly keyboard-dancing
2068 quiet() { "$@" 2> /dev/null; }
2069 
2070 # Reset the screen, which empties it and resets the current style
2071 r() { reset; }
2072 
2073 # keep only lines between the 2 line numbers given, inclusively
2074 rangelines() {
2075     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
2076         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
2077 }
2078 
2079 # RANdom MANual page
2080 ranman() {
2081     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
2082         sed 's-\.gz$--' | xargs man
2083 }
2084 
2085 # Run AWK expression
2086 rawk() {
2087     local expr="${1:-0}"
2088     [ $# -gt 0 ] && shift
2089     awk "BEGIN { print ${expr}; exit }" "$@"
2090 }
2091 
2092 # play a ready-phone-line sound lasting the number of seconds given, or for 1
2093 # second by default; uses my script `waveout`
2094 ready() {
2095     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
2096     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2097 }
2098 
2099 # reflow/trim lines of prose (text) to improve its legibility: it's especially
2100 # useful when the text is pasted from web-pages being viewed in reader mode
2101 reprose() {
2102     local w="${1:-80}"
2103     [ $# -gt 0 ] && shift
2104     awk '
2105         FNR == 1 && NR > 1 { print "" }
2106         { gsub(/\r$/, ""); print; fflush() }
2107     ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--'
2108 }
2109 
2110 # ignore ansi styles from stdin and restyle things using the style-name given;
2111 # uses my script `style`
2112 restyle() { style "$@"; }
2113 
2114 # change the tab-title on your terminal app
2115 retitle() { printf "\e]0;%s\a\n" "$*"; }
2116 
2117 # REVerse-order SIZE (byte-count)
2118 revsize() { wc -c "$@" | sort -rn; }
2119 
2120 # Run In Folder
2121 rif() {
2122     local code
2123     pushd "${1:-.}" > /dev/null || return
2124     [ $# -gt 0 ] && shift
2125     "$@"
2126     code=$?
2127     popd > /dev/null || return "${code}"
2128     return "${code}"
2129 }
2130 
2131 # play a ringtone-style sound lasting the number of seconds given, or for 1
2132 # second by default; uses my script `waveout`
2133 ringtone() {
2134     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2135     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2136 }
2137 
2138 # Read-Only Editor
2139 roe() { micro -readonly true "$@"; }
2140 
2141 # Read-Only Micro (text editor)
2142 rom() { micro -readonly true "$@"; }
2143 
2144 # run the command given, trying to turn its output into TSV (tab-separated
2145 # values); uses my script `dejson`
2146 rtab() { jc "$@" | dejson; }
2147 
2148 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2149 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2150 
2151 # show a RULER-like width-measuring line
2152 # ruler() {
2153 #     local n="${1:-$(tput cols)}"
2154 #     [ "${n}" -gt 0 ] && printf "%${n}s\n" "" |
2155 #         sed -E 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2156 # }
2157 
2158 # show a RULER-like width-measuring line
2159 ruler() {
2160     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2161         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2162 }
2163 
2164 # run the command given, trying to turn its output into TSV (tab-separated
2165 # values); uses my script `dejson`
2166 runtab() { jc "$@" | dejson; }
2167 
2168 # run the command given, trying to turn its output into TSV (tab-separated
2169 # values); uses my script `dejson`
2170 runtsv() { jc "$@" | dejson; }
2171 
2172 # Reverse-order WC
2173 rwc() { wc "$@" | sort -rn; }
2174 
2175 # extended-mode Sed, enabling its full regex syntax
2176 # s() { sed -E -u "$@"; }
2177 
2178 # Substitute using `sed`, enabling its full regex syntax
2179 s() { sed -E -u "$(printf "s\xff$1\xff$2\xffg")"; }
2180 
2181 # Silent CURL spares you the progress bar, but still tells you about errors
2182 scurl() { curl --show-error -s "$@"; }
2183 
2184 # show a unique-looking SEParator line; useful to run between commands
2185 # which output walls of text
2186 sep() {
2187     [ "${1:-80}" -gt 0 ] &&
2188         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2189 }
2190 
2191 # webSERVE files in a folder as localhost, using the port number given, or
2192 # port 8080 by default
2193 serve() {
2194     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2195     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2196 }
2197 
2198 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2199 setdiff() {
2200     # comm -23 <(sort "$1") <(sort "$2")
2201     # dash doesn't support the process-sub syntax
2202     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2203 }
2204 
2205 # SET INtersection, sorts its 2 inputs, then finds common lines
2206 setin() {
2207     # comm -12 <(sort "$1") <(sort "$2")
2208     # dash doesn't support the process-sub syntax
2209     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2210 }
2211 
2212 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2213 setsub() {
2214     # comm -23 <(sort "$1") <(sort "$2")
2215     # dash doesn't support the process-sub syntax
2216     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2217 }
2218 
2219 # Show Files (and folders), coloring folders and links; uses my script `nn`
2220 sf() {
2221     ls -al --file-type --color=never --time-style iso "$@" | awk '
2222         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2223         {
2224             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2225             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2226             printf "%6d  %s\n", NR - 1, $0; fflush()
2227         }
2228     ' | nn --gray | less -JMKiCRS
2229 }
2230 
2231 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2232 # uses my scripts `nn` and `cext`
2233 sfp() {
2234     ls -al --file-type --color=never --time-style iso "$@" | awk '
2235         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2236         {
2237             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2238             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2239             printf "%6d  %s\n", NR - 1, $0; fflush()
2240         }
2241     ' | nn --gray | cext | less -JMKiCRS
2242 }
2243 
2244 # Show File Sizes, using my scripts `nn` and `cext`
2245 sfs() {
2246     # turn arg-list into single-item lines
2247     printf "%s\x00" "$@" |
2248     # calculate file-sizes, and reverse-sort results
2249     xargs -0 wc -c | sort -rn |
2250     # add/realign fields to improve legibility
2251     awk '
2252         # start output with a header-like line, and add a MiB field
2253         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2254         # make table breathe with empty lines, so tall outputs are readable
2255         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2256         # emit regular output lines
2257         {
2258             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2259             # first field is likely space-padded
2260             gsub(/^ */, "")
2261             # slice line after the first field, as filepaths can have spaces
2262             $0 = substr($0, length($1) + 1)
2263             # first field is likely space-padded
2264             gsub(/^ /, "")
2265             printf "%s\n", $0; fflush()
2266         }
2267     ' |
2268     # make zeros in the MiB field stand out with a special color
2269     awk '
2270         {
2271             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2272             print; fflush()
2273         }
2274     ' |
2275     # make numbers nice, alternating styles along 3-digit groups
2276     nn --gray |
2277     # color-code file extensions
2278     cext |
2279     # make result interactively browsable
2280     less -JMKiCRS
2281 }
2282 
2283 # SHell-run AWK output
2284 # shawk() { stdbuf -oL awk "$@" | sh; }
2285 
2286 # time-run various tools given one-per-line from stdin, giving them extra
2287 # common arguments passed as explicit arguments
2288 showdown() {
2289     awk '
2290         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2291         {
2292             printf "%s", $0
2293             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2294             printf "\x00"; fflush()
2295         }
2296     ' "$@" | xargs -0 hyperfine --style full
2297 }
2298 
2299 # SHOW a command, then RUN it
2300 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2301 
2302 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2303 # single-filepaths compatible with `xargs`, since standard shell settings
2304 # get in the way of filepaths with spaces and other special symbols in them
2305 shquote() {
2306     awk '
2307         {
2308             s = $0
2309             gsub(/\r$/, "", s)
2310             gsub(/\\/, "\\\\", s)
2311             gsub(/"/, "\\\"", s)
2312             gsub(/`/, "\\`", s)
2313             gsub(/\$/, "\\$", s)
2314             printf "\"%s\"\n", s; fflush()
2315         }
2316     ' "$@"
2317 }
2318 
2319 # clean the screen, after running the command given
2320 # sideshow() { tput smcup; "$@"; tput rmcup; }
2321 
2322 # skip the first n lines, or the 1st line by default
2323 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2324 
2325 # skip the first n bytes
2326 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2327 
2328 # skip the last n lines, or the last line by default
2329 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2330 
2331 # skip the last n bytes
2332 skiplastbytes() { head -c -"$1" "${2:--}"; }
2333 
2334 # skip the last n lines, or the last line by default
2335 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2336 
2337 # skip the first n lines, or the 1st line by default
2338 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2339 
2340 # SLOW/delay lines from the standard-input, waiting the number of seconds
2341 # given for each line, or waiting 1 second by default
2342 slow() {
2343     local seconds="${1:-1}"
2344     (
2345         IFS="$(printf "\n")"
2346         while read -r line; do
2347             sleep "${seconds}"
2348             printf "%s\n" "${line}"
2349         done
2350     )
2351 }
2352 
2353 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2354 slp() {
2355     local title
2356     title="Latest Podcast Episodes as of $(date +'%F %T')"
2357     podfeed -title "${title}" "$@" | si
2358 }
2359 
2360 # recursively find all files with fewer bytes than the number given
2361 smallfiles() {
2362     local n
2363     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2364     [ $# -gt 0 ] && shift
2365 
2366     local arg
2367     for arg in "${@:-.}"; do
2368         if [ ! -d "${arg}" ]; then
2369             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2370             return 1
2371         fi
2372         stdbuf -oL find "${arg}" -type f -size -"$n"c
2373     done
2374 }
2375 
2376 # emit the first line as is, sorting all lines after that, using the
2377 # `sort` command, passing all/any arguments/options to it
2378 sortrest() {
2379     awk -v sort="sort $*" '
2380         { gsub(/\r$/, "") }
2381         NR == 1 { print; fflush() }
2382         NR > 1 { print | sort }
2383     '
2384 }
2385 
2386 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2387 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2388 # all/any arguments/options to it
2389 sortsv() {
2390     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2391         { gsub(/\r$/, "") }
2392         NR == 1 { print; fflush() }
2393         NR > 1 { print | sort }
2394     '
2395 }
2396 
2397 # emit a line with the number of spaces given in it
2398 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2399 
2400 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2401 # in the middle of lines, as well as trailing carriage returns
2402 squeeze() {
2403     awk '
2404         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2405         {
2406             gsub(/^ +| *\r?$/, "")
2407             gsub(/ *\t */, "\t")
2408             gsub(/  +/, " ")
2409             print; fflush()
2410         }
2411     ' "$@"
2412 }
2413 
2414 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2415 # of multiple spaces in the middle of lines, as well as trailing carriage
2416 # returns, while also turning runs of empty lines into single empty lines,
2417 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2418 # lines vertically
2419 squomp() {
2420     awk '
2421         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2422         /^\r?$/ { empty = 1; next }
2423         empty { if (n > 0) print ""; empty = 0 }
2424         {
2425             gsub(/^ +| *\r?$/, "")
2426             gsub(/ *\t */, "\t")
2427             gsub(/  +/, " ")
2428             print; fflush()
2429             n++
2430         }
2431     ' "$@"
2432 }
2433 
2434 # Show a command, then Run it
2435 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2436 
2437 # turn runs of empty lines into single empty lines, effectively squeezing
2438 # paragraphs vertically, so to speak; runs of empty lines both at the start
2439 # and at the end are ignored
2440 stomp() {
2441     awk '
2442         /^\r?$/ { empty = 1; next }
2443         empty { if (n > 0) print ""; empty = 0 }
2444         { print; fflush(); n++ }
2445     ' "$@"
2446 }
2447 
2448 # STRike-thru (lines) with AWK
2449 strawk() {
2450     local cond="${1:-1}"
2451     [ $# -gt 0 ] && shift
2452     awk '
2453         { low = lower = tolower($0) }
2454         '"${cond}"' {
2455             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2456             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2457             next
2458         }
2459         { print; fflush() }
2460     ' "$@"
2461 }
2462 
2463 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2464 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2465 # all/any arguments/options to it
2466 stsv() {
2467     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2468         { gsub(/\r$/, "") }
2469         NR == 1 { print; fflush() }
2470         NR > 1 { print | sort }
2471     '
2472 }
2473 
2474 # use the result of the `awk` function `substr` for each line
2475 substr() {
2476     local start="${1:-1}"
2477     local length="${2:-80}"
2478     [ $# -gt 0 ] && shift
2479     [ $# -gt 0 ] && shift
2480     awk -v start="${start}" -v len="${length}" \
2481         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2482 }
2483 
2484 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2485 # what's given, before relinquishing existing privileges
2486 # sudoff() {
2487 #     local code=0
2488 #     if [ $# -gt 0 ]; then
2489 #         sudo "$@"
2490 #         code=$?
2491 #     fi
2492 #     sudo -k
2493 #     return "${code}"
2494 # }
2495 
2496 # append a final Tab-Separated-Values line with the sums of all columns from
2497 # the input table(s) given; items from first lines aren't counted/added
2498 sumtsv() {
2499     awk -F "\t" '
2500         {
2501             print; fflush()
2502             if (width < NF) width = NF
2503         }
2504 
2505         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2506 
2507         END {
2508             for (i = 1; i <= width; i++) {
2509                 if (i > 1) printf "\t"
2510                 printf "%s", sums[i] ""
2511             }
2512             if (width > 0) printf "\n"
2513         }
2514     ' "$@"
2515 }
2516 
2517 # show a random command defined in `clam`, using `wat` from `clam` itself
2518 # surprise() {
2519 #     local p="$(which clam)"
2520 #     wat "$(grep -E '^[a-z]+\(' "$p" | shuf -n 1 | sed -E 's-\(.*--')"
2521 # }
2522 
2523 # Time the command given
2524 t() { /usr/bin/time "$@"; }
2525 
2526 # show a reverse-sorted tally of all lines read, where ties are sorted
2527 # alphabetically
2528 tally() {
2529     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2530         # reassure users by instantly showing the header
2531         BEGIN { print "value\ttally"; fflush() }
2532         { gsub(/\r$/, ""); t[$0]++ }
2533         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2534     ' "$@"
2535 }
2536 
2537 # Tab AWK: TSV-specific I/O settings for `awk`
2538 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2539 
2540 # Tab AWK: TSV-specific I/O settings for `awk`
2541 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2542 
2543 # quick alias for my script `tbp`
2544 tb() { tbp "$@"; }
2545 
2546 # Titled conCATenate Lines highlights each filename, before emitting its
2547 # lines
2548 tcatl() {
2549     awk '
2550         FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() }
2551         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2552         { gsub(/\r$/, ""); print; fflush() }
2553     ' "$@"
2554 }
2555 
2556 # Title ECHO changes the tab-title on your terminal app
2557 techo() { printf "\e]0;%s\a\n" "$*"; }
2558 
2559 # simulate the cadence of old-fashioned teletype machines, by slowing down
2560 # the output of ASCII/UTF-8 symbols from the standard-input
2561 # teletype() {
2562 #     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2563 #         IFS="$(printf "\n")"
2564 #         while read -r line; do
2565 #             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2566 #                 while read -r item; do
2567 #                     sleep 0.015
2568 #                     printf "%s" "${item}"
2569 #                 done
2570 #             sleep 0.75
2571 #             printf "\n"
2572 #         done
2573 #     )
2574 # }
2575 
2576 # simulate the cadence of old-fashioned teletype machines, by slowing down
2577 # the output of ASCII/UTF-8 symbols from the standard-input
2578 teletype() {
2579     awk '
2580         {
2581             gsub(/\r$/, "")
2582 
2583             n = length($0)
2584             for (i = 1; i <= n; i++) {
2585                 if (code = system("sleep 0.015")) exit code
2586                 printf "%s", substr($0, i, 1); fflush()
2587             }
2588             if (code = system("sleep 0.75")) exit code
2589             printf "\n"; fflush()
2590         }
2591     ' "$@"
2592 }
2593 
2594 # run `top` without showing any of its output after quitting it
2595 tip() { tput smcup; top "$@"; tput rmcup; }
2596 
2597 # change the tab-title on your terminal app
2598 title() { printf "\e]0;%s\a\n" "$*"; }
2599 
2600 # quick alias for my script `tjp`
2601 tj() { tjp "$@"; }
2602 
2603 # quick alias for my script `tlp`
2604 tl() { tlp "$@"; }
2605 
2606 # show current date in a specifc format
2607 today() { date +'%Y-%m-%d %a %b %d'; }
2608 
2609 # get the first n lines, or 1 by default
2610 toline() { head -n "${1:-1}" "${2:--}"; }
2611 
2612 # lowercase all ASCII symbols
2613 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2614 
2615 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2616 # second by default: after the optional duration, the next optional arguments
2617 # are the volume and the tone-frequency; uses my script `waveout`
2618 tone() {
2619     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2620         mpv --really-quiet -
2621 }
2622 
2623 # get the processes currently using the most cpu
2624 topcpu() {
2625     local n="${1:-10}"
2626     [ "$n" -gt 0 ] && ps aux | awk '
2627         NR == 1 { print; fflush() }
2628         NR > 1 { print | "sort -rnk3" }
2629     ' | head -n "$(("$n" + 1))"
2630 }
2631 
2632 # show all files directly in the folder given, without looking any deeper
2633 topfiles() {
2634     local arg
2635     for arg in "${@:-.}"; do
2636         if [ ! -d "${arg}" ]; then
2637             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2638             return 1
2639         fi
2640         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2641     done
2642 }
2643 
2644 # show all folders directly in the folder given, without looking any deeper
2645 topfolders() {
2646     local arg
2647     for arg in "${@:-.}"; do
2648         if [ ! -d "${arg}" ]; then
2649             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2650             return 1
2651         fi
2652         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2653             awk '!/^\.$/ { print; fflush() }'
2654     done
2655 }
2656 
2657 # get the processes currently using the most memory
2658 topmemory() {
2659     local n="${1:-10}"
2660     [ "$n" -gt 0 ] && ps aux | awk '
2661         NR == 1 { print; fflush() }
2662         NR > 1 { print | "sort -rnk6" }
2663     ' | head -n "$(("$n" + 1))"
2664 }
2665 
2666 # transpose (switch) rows and columns from tables
2667 transpose() {
2668     awk '
2669         { gsub(/\r$/, "") }
2670 
2671         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
2672 
2673         {
2674             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2675             if (maxitems < NF) maxitems = NF
2676         }
2677 
2678         END {
2679             for (j = 1; j <= maxitems; j++) {
2680                 for (i = 1; i <= NR; i++) {
2681                     if (i > 1) printf "\t"
2682                     printf "%s", lines[j][i]
2683                 }
2684                 printf "\n"
2685             }
2686         }
2687     ' "$@"
2688 }
2689 
2690 # ignore leading/trailing spaces, as well as trailing carriage returns
2691 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2692 
2693 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2694 # decimal dots themselves, when decimals in a number are all zeros; works
2695 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2696 # trimdecs() {
2697 #     awk '
2698 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2699 #         {
2700 #             gsub(/\r$/, "")
2701 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2702 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2703 #             print; fflush()
2704 #         }
2705 #     ' "$@"
2706 # }
2707 
2708 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2709 # decimal dots themselves, when decimals in a number are all zeros
2710 trimdecs() {
2711     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" |
2712         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2713 }
2714 
2715 # ignore trailing spaces, as well as trailing carriage returns
2716 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2717 
2718 # ignore trailing spaces, as well as trailing carriage returns
2719 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2720 
2721 # ignore leading/trailing spaces, as well as trailing carriage returns
2722 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2723 
2724 # ignore leading/trailing spaces, as well as trailing carriage returns
2725 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2726 
2727 # ignore trailing spaces, as well as trailing carriage returns
2728 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2729 
2730 # ignore trailing spaces, as well as trailing carriage returns
2731 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2732 
2733 # try running a command, emitting an explicit message to standard-error
2734 # if the command given fails
2735 try() {
2736     "$@" || {
2737         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2738         return 255
2739     }
2740 }
2741 
2742 # Transform Strings with Python; uses my script `tbp`
2743 tsp() { tbp -s "$@"; }
2744 
2745 # run the command given, trying to turn its output into TSV (tab-separated
2746 # values); uses my script `dejson`
2747 tsvrun() { jc "$@" | dejson; }
2748 
2749 # Underline (lines) with AWK
2750 uawk() {
2751     local cond="${1:-1}"
2752     [ $# -gt 0 ] && shift
2753     awk '
2754         { low = lower = tolower($0) }
2755         '"${cond}"' {
2756             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2757             printf "\x1b[4m%s\x1b[0m\n", $0; fflush()
2758             next
2759         }
2760         { print; fflush() }
2761     ' "$@"
2762 }
2763 
2764 # Underline Every few lines: make groups of 5 lines (by default) stand out by
2765 # underlining the last line of each
2766 ue() {
2767     local n="${1:-5}"
2768     [ $# -gt 0 ] && shift
2769     awk -v n="$n" '
2770         BEGIN { if (n == 0) n = -1 }
2771         NR % n == 0 && NR != 1 {
2772             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2773             printf("\x1b[4m%s\x1b[0m\n", $0); fflush()
2774             next
2775         }
2776         { print; fflush() }
2777     ' "$@"
2778 }
2779 
2780 # deduplicate lines, keeping them in their original order
2781 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2782 
2783 # concatenate all named input sources unix-style: all trailing CRLFs become
2784 # single LFs, each non-empty input will always end in a LF, so lines from
2785 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2786 # first line of each input are ignored, as those are useless at best
2787 unixify() {
2788     awk '
2789         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2790         { gsub(/\r$/, ""); print; fflush() }
2791     ' "$@"
2792 }
2793 
2794 # go UP n folders, or go up 1 folder by default
2795 up() {
2796     if [ "${1:-1}" -le 0 ]; then
2797         cd .
2798         return $?
2799     fi
2800 
2801     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2802 }
2803 
2804 # convert United States Dollars into CAnadian Dollars, using the latest
2805 # official exchange rates from the bank of canada; during weekends, the
2806 # latest rate may be from a few days ago; the default amount of usd to
2807 # convert is 1, when not given
2808 usd2cad() {
2809     local site='https://www.bankofcanada.ca/valet/observations/group'
2810     local csv_rates="${site}/FX_RATES_DAILY/csv"
2811     local url
2812     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2813     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2814         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2815         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2816 }
2817 
2818 # View with `less`
2819 v() { less -JMKiCRS "$@"; }
2820 
2821 # run a command, showing its success/failure right after
2822 verdict() {
2823     local code
2824     "$@"
2825     code=$?
2826 
2827     if [ "${code}" -eq 0 ]; then
2828         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2829     else
2830         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2831     fi
2832     return "${code}"
2833 }
2834 
2835 # run `cppcheck` with even stricter options
2836 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2837 
2838 # run `cppcheck` with even stricter options
2839 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2840 
2841 # check shell scripts for common gotchas, avoiding complaints about using
2842 # the `local` keyword, which is widely supported in practice
2843 vetshell() { shellcheck -e 3043 "$@"; }
2844 
2845 # View with Header runs `less` without line numbers, with ANSI styles, no
2846 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2847 # so they always show on top
2848 vh() {
2849     local n="${1:-1}"
2850     [ $# -gt 0 ] && shift
2851     less --header="$n" -JMKiCRS "$@"
2852 }
2853 
2854 # VIEW the result of showing a command, then RUNning it, using `less`
2855 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2856 
2857 # View Nice Columns; uses my scripts `realign` and `nn`
2858 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2859 
2860 # View Nice Hexadecimals; uses my script `nh`
2861 vnh() { nh "$@" | less -JMKiCRS; }
2862 
2863 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2864 vnj() { nj "$@" | less -JMKiCRS; }
2865 
2866 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2867 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2868 
2869 # View Nice Numbers; uses my script `nn`
2870 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2871 
2872 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2873 vnt() {
2874     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2875         nt | nn --gray |
2876         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2877         less -JMKiCRS #--header=1
2878 }
2879 
2880 # View-Run using `less`: show a command, then run it
2881 # vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less --header=1 -JMKiCRS; }
2882 
2883 # View-Run using `less`: show a command, then run it
2884 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2885 
2886 # View Text with `less`
2887 # vt() { less -JMKiCRS "$@"; }
2888 
2889 # View Text with the `micro` text-editor in read-only mode
2890 vt() { micro -readonly true "$@"; }
2891 
2892 # What are these (?); uses my command `nwat`
2893 # w() { nwat "$@"; }
2894 
2895 # What Are These (?) shows what the names given to it are/do
2896 wat() {
2897     local a
2898     local gap=0
2899 
2900     if [ $# -eq 0 ]; then
2901         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2902         return 1
2903     fi
2904 
2905     for a in "$@"; do
2906         [ "${gap}" -gt 0 ] && printf "\n"
2907         gap=1
2908         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2909         printf "\e[7m%-80s\e[0m\n" "$a"
2910 
2911         # resolve 1 alias level
2912         if alias "$a" 2> /dev/null > /dev/null; then
2913             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2914         fi
2915 
2916         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2917             # resolved aliases with args/spaces in them would otherwise fail
2918             echo "$a"
2919         elif whence -f "$a" > /dev/null 2> /dev/null; then
2920             # zsh seems to show a shell function's code only via `whence -f`
2921             whence -f "$a"
2922         elif type "$a" > /dev/null 2> /dev/null; then
2923             # dash doesn't support `declare`, and `type` in bash emits
2924             # a redundant first output line, when it's a shell function
2925             type "$a" | awk '
2926                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2927                 { print; fflush() }
2928                 END { if (NR < 2 && skipped) print skipped }
2929             '
2930         else
2931             printf "\e[31m%s not found\e[0m\n" "$a"
2932         fi
2933     done | less -JMKiCRS
2934 }
2935 
2936 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2937 # lines instead
2938 wctsv() {
2939     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2940     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2941         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2942         awk '
2943             NR > 1 { print prev; fflush() }
2944             { prev = $0 }
2945             END { if (NR == 1 || !/^total\t/) print }
2946         '
2947 }
2948 
2949 # get weather forecasts, almost filling the terminal's current width
2950 # weather() {
2951 #     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2952 #     curl --show-error -s telnet://graph.no:79 |
2953 #     sed -E \
2954 #         -e 's/ *\r?$//' \
2955 #         -e '/^\[/d' \
2956 #         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
2957 #         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
2958 #         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
2959 #         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
2960 #         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
2961 #         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
2962 #         -e 's/\*/○/g' |
2963 #     awk 1 |
2964 #     less -JMKiCRS
2965 # }
2966 
2967 # get weather forecasts; uses my script `nwf`
2968 weather() { nwf "$@"; }
2969 
2970 # Weather Forecast
2971 wf() {
2972     printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 |
2973         awk '{ print; fflush() }' | less -JMKiCRS
2974 }
2975 
2976 # recursively find all files with trailing spaces/CRs
2977 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2978 
2979 # recursively find all files with trailing spaces/CRs
2980 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2981 
2982 # turn all full linux/unix-style paths (which start from the filesystem root)
2983 # detected into WINdows-style PATHS
2984 winpaths() {
2985     awk '{ print; fflush() }' "$@" |
2986         sed -u -E 's-(/mnt/([A-Za-z])(/))-\u\2:/-g'
2987 }
2988 
2989 # run `xargs`, using whole lines as extra arguments
2990 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
2991 
2992 # run `xargs`, using whole lines as extra arguments
2993 # x() {
2994 #     awk -v ORS='\000' '
2995 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2996 #         { gsub(/\r$/, ""); print; fflush() }
2997 #     ' | xargs -0 "$@"
2998 # }
2999 
3000 # run `xargs`, using zero/null bytes as the extra-arguments terminator
3001 x0() { xargs -0 "$@"; }
3002 
3003 # run `xargs`, using whole lines as extra arguments
3004 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
3005 
3006 # run `xargs`, using whole lines as extra arguments
3007 xl() {
3008     awk -v ORS='\000' '
3009         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
3010         { gsub(/\r$/, ""); print; fflush() }
3011     ' | xargs -0 "$@"
3012 }
3013 
3014 # Youtube Audio Player
3015 yap() {
3016     local url
3017     # some youtube URIs end with extra playlist/tracker parameters
3018     url="$(echo "$1" | sed 's-&.*--')"
3019     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
3020 }
3021 
3022 # show a calendar for the current YEAR, or for the year given
3023 year() {
3024     {
3025         # show the current date/time center-aligned
3026         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
3027             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
3028         # debian linux has a different `cal` app which highlights the day
3029         if [ -e "/usr/bin/ncal" ]; then
3030             # fix debian/ncal's weird way to highlight the current day
3031             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
3032         else
3033             cal -y "$@"
3034         fi
3035     } | less -JMKiCRS
3036 }
3037 
3038 # show the current date in the YYYY-MM-DD format
3039 ymd() { date +'%Y-%m-%d'; }
3040 
3041 # YouTube Url
3042 ytu() {
3043     local url
3044     # some youtube URIs end with extra playlist/tracker parameters
3045     url="$(echo "$1" | sed 's-&.*--')"
3046     [ $# -gt 0 ] && shift
3047     yt-dlp "$@" --get-url "${url}"
3048 }
3049 
3050 # . <(
3051 #     find "$(dirname $(which clam))" -type f -print0 |
3052 #         xargs -0 -n 1 basename |
3053 #         awk '{ print "unset " $0; print "unalias " $0 }'
3054 # ) 2> /dev/null