File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 # handle help options
  55 case "$1" in
  56     -h|--h|-help|--help)
  57         # show help message, using the info-comment from this very script
  58         awk '
  59             /^case / { exit }
  60             /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print }
  61         ' "$0"
  62         exit 0
  63     ;;
  64 esac
  65 
  66 
  67 # dash doesn't support regex-matching syntax, forcing to use case statements
  68 case "$0" in
  69     -bash|-dash|-sh|bash|dash|sh)
  70         # script is being sourced with bash or dash, which is good
  71         :
  72     ;;
  73     *)
  74         case "$ZSH_EVAL_CONTEXT" in
  75             *:file)
  76                 # script is being sourced with zsh, which is good
  77                 :
  78             ;;
  79             *)
  80                 # script is being run normally, which is a waste of time
  81 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  82 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  83                 # failing during shell-startup may deny shell access, so exit
  84                 # with a 0 error-code to declare success
  85                 exit 0
  86             ;;
  87         esac
  88     ;;
  89 esac
  90 
  91 
  92 # n-column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  93 alias 1='bsbs 1'
  94 alias 2='bsbs 2'
  95 alias 3='bsbs 3'
  96 alias 4='bsbs 4'
  97 alias 5='bsbs 5'
  98 alias 6='bsbs 6'
  99 alias 7='bsbs 7'
 100 alias 8='bsbs 8'
 101 alias 9='bsbs 9'
 102 alias 0='bsbs 10'
 103 
 104 # alias a=avoid
 105 # alias c=cat
 106 # alias e=echo
 107 # alias f=fetch
 108 # alias g=get
 109 # alias h=naman
 110 # alias m=match
 111 # alias p=plain
 112 # alias q=quiet
 113 # alias r=reset
 114 # alias t=time
 115 # alias y=year
 116 
 117 # find name from the local `apt` database of installable packages
 118 # aptfind() {
 119 #     # despite warnings, the `apt search` command has been around for years
 120 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 121 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | sed -u 's/^--$//'
 122 # }
 123 
 124 # emit each argument given as its own line of output
 125 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 126 
 127 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 128 # their basic ASCII counterparts, and where non-ASCII symbols become question
 129 # marks, one question mark for each code-point byte
 130 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 131 
 132 # avoid/ignore lines which match any of the regexes given
 133 avoid() {
 134     awk '
 135         BEGIN {
 136             for (i = 1; i < ARGC; i++) {
 137                 e[i] = ARGV[i]
 138                 delete ARGV[i]
 139             }
 140         }
 141 
 142         {
 143             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 144             print; fflush()
 145             got++
 146         }
 147 
 148         END { exit(got == 0) }
 149     ' "${@:-^\r?$}"
 150 }
 151 
 152 # AWK Begin
 153 # awkb() { awk "BEGIN { $1; exit }"; }
 154 
 155 # AWK Begin
 156 awkb() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 157 
 158 # emit a line with a repeating ball-like symbol in it
 159 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 160 
 161 # show an ansi-styled BANNER-like line
 162 # banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 163 
 164 # show an ansi-styled BANNER-like line
 165 banner() { printf "\e[7m%-$(tput cols)s\e[0m\n" "$*"; }
 166 
 167 # emit a colored bar which can help visually separate different outputs
 168 bar() {
 169     [ "${1:-80}" -gt 0 ] &&
 170         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 171 }
 172 
 173 # process Blocks/paragraphs of non-empty lines with AWK
 174 # bawk() { awk -F='' -v RS='' "$@"; }
 175 
 176 # process Blocks/paragraphs of non-empty lines with AWK
 177 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 178 
 179 # play a repeating and annoying high-pitched beep sound a few times a second,
 180 # lasting the number of seconds given, or for 1 second by default; uses my
 181 # script `waveout`
 182 beeps() {
 183     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 184     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 185 }
 186 
 187 # start by joining all arguments given as a tab-separated-items line of output,
 188 # followed by all lines from stdin verbatim
 189 begintsv() {
 190     awk '
 191         BEGIN {
 192             for (i = 1; i < ARGC; i++) {
 193                 if (i > 1) printf "\t"
 194                 printf "%s", ARGV[i]
 195                 delete ARGV[i]
 196             }
 197             if (ARGC > 1) printf "\n"
 198             fflush()
 199         }
 200         { print; fflush() }
 201     ' "$@"
 202 }
 203 
 204 # play a repeating synthetic-bell-like sound lasting the number of seconds
 205 # given, or for 1 second by default; uses my script `waveout`
 206 bell() {
 207     local f='sin(880*tau*u) * exp(-10*u)'
 208     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 209 }
 210 
 211 # play a repeating sound with synthetic-bells, lasting the number of seconds
 212 # given, or for 1 second by default; uses my script `waveout`
 213 bells() {
 214     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 215     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 216 }
 217 
 218 # Breathe Header: add an empty line after the first one (the header), then
 219 # separate groups of 5 lines (by default) with empty lines between them
 220 bh() {
 221     local n="${1:-5}"
 222     [ $# -gt 0 ] && shift
 223     awk -v n="$n" '
 224         BEGIN { if (n == 0) n = -1 }
 225         (NR - 1) % n == 1 && NR > 1 { print "" }
 226         { print; fflush() }
 227     ' "$@"
 228 }
 229 
 230 # recursively find all files with at least the number of bytes given; when
 231 # not given a minimum byte-count, the default is 100 binary megabytes
 232 bigfiles() {
 233     local n
 234     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 235     [ $# -gt 0 ] && shift
 236 
 237     local arg
 238     for arg in "${@:-.}"; do
 239         if [ ! -d "${arg}" ]; then
 240             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 241             return 1
 242         fi
 243         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 244     done
 245 }
 246 
 247 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 248 bl() {
 249     local n="${1:-5}"
 250     [ $# -gt 0 ] && shift
 251     awk -v n="$n" '
 252         BEGIN { if (n == 0) n = -1 }
 253         NR % n == 1 && NR != 1 { print "" }
 254         { print; fflush() }
 255     ' "$@"
 256 }
 257 
 258 # process BLocks/paragraphs of non-empty lines with AWK
 259 # blawk() { awk -F='' -v RS='' "$@"; }
 260 
 261 # process BLocks/paragraphs of non-empty lines with AWK
 262 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 263 
 264 # emit a line with a repeating block-like symbol in it
 265 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 266 
 267 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 268 # my script `bsbs`
 269 bman() {
 270     local w
 271     w="$(tput cols)"
 272     if [ "$w" -gt 120 ]; then
 273         w="$((w / 2 - 1))"
 274     fi
 275     MANWIDTH="$w" man "$@" | bsbs 2
 276 }
 277 
 278 # Begin-Only Awk
 279 # boa() { awk "BEGIN { $1; exit }"; }
 280 
 281 # Begin-Only Awk
 282 boa() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 283 
 284 # Begin-Only AWK
 285 # boawk() { awk "BEGIN { $1; exit }"; }
 286 
 287 # Begin-Only AWK
 288 boawk() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 289 
 290 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 291 # my script `bsbs`
 292 bookman() {
 293     local w
 294     w="$(tput cols)"
 295     if [ "$w" -gt 120 ]; then
 296         w="$((w / 2 - 1))"
 297     fi
 298     MANWIDTH="$w" man "$@" | bsbs 2
 299 }
 300 
 301 # split lines using the regex given, turning them into single-item lines
 302 breakdown() {
 303     local sep="${1:- }"
 304     [ $# -gt 0 ] && shift
 305     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 306 }
 307 
 308 # separate groups of 5 lines (by default) with empty lines
 309 breathe() {
 310     local n="${1:-5}"
 311     [ $# -gt 0 ] && shift
 312     awk -v n="$n" '
 313         BEGIN { if (n == 0) n = -1 }
 314         NR % n == 1 && NR != 1 { print "" }
 315         { print; fflush() }
 316     ' "$@"
 317 }
 318 
 319 # Browse Text
 320 bt() { less -JMKNiCRS "$@"; }
 321 
 322 # show a reverse-sorted tally of all lines read, where ties are sorted
 323 # alphabetically, and where trailing bullets are added to quickly make
 324 # the tally counts comparable at a glance
 325 bully() {
 326     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 327         # reassure users by instantly showing the header
 328         BEGIN { print "value\ttally\tbullets"; fflush() }
 329 
 330         { gsub(/\r$/, ""); tally[$0]++ }
 331 
 332         END {
 333             # find the max tally, which is needed to build the bullets-string
 334             max = 0
 335             for (k in tally) {
 336                 if (max < tally[k]) max = tally[k]
 337             }
 338 
 339             # make enough bullets for all tallies: this loop makes growing the
 340             # string a task with complexity O(n * log n), instead of a naive
 341             # O(n**2), which can slow-down things when tallies are high enough
 342             bullets = "•"
 343             for (n = max; n > 1; n /= 2) {
 344                 bullets = bullets bullets
 345             }
 346 
 347             # emit unsorted output lines to the sort cmd, which will emit the
 348             # final reverse-sorted tally lines
 349             for (k in tally) {
 350                 s = substr(bullets, 1, tally[k])
 351                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 352             }
 353         }
 354     ' "$@"
 355 }
 356 
 357 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 358 # second by default; uses my script `waveout`
 359 busy() {
 360     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 361     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 362     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 363     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 364 }
 365 
 366 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 367 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 368 
 369 # keep all BUT the LAST n lines, or skip just the last line by default
 370 butlast() { head -n -"${1:-1}" "${2:--}"; }
 371 
 372 # load bytes from the filenames given
 373 bytes() { cat "$@"; }
 374 
 375 # quick alias for `cat`
 376 c() { cat "$@"; }
 377 
 378 # CAlculator with Nice numbers runs my script `ca` and colors results with
 379 # my script `nn`, alternating styles to make long numbers easier to read
 380 can() { ca "$@" | nn --gray; }
 381 
 382 # uppercase the first letter on each line, and lowercase all later letters
 383 capitalize() {
 384     awk '{ print; fflush() }' "$@" | sed -E 's-^(.*)-\L\1-; s-^(.)-\u\1-'
 385 }
 386 
 387 # conCATenate Lines guarantees no lines are ever accidentally joined
 388 # across inputs, always emitting a line-feed at the end of every line
 389 # catl() { awk '{ print; fflush() }' "$@"; }
 390 
 391 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 392 # carriage-returns, and guarantees no lines are ever accidentally joined
 393 # across inputs, always emitting a line-feed at the end of every line
 394 catl() {
 395     awk '
 396         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 397         { gsub(/\r$/, ""); print; fflush() }
 398     ' "$@"
 399 }
 400 
 401 # Csv AWK: CSV-specific input settings for `awk`
 402 # cawk() { awk --csv "$@"; }
 403 
 404 # Csv AWK: CSV-specific input settings for `awk`
 405 cawk() { stdbuf -oL awk --csv "$@"; }
 406 
 407 # Compile C Stripped
 408 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 409 
 410 # center-align lines of text, using the current screen width
 411 center() {
 412     awk -v width="$(tput cols)" '
 413         {
 414             gsub(/\r$/, "")
 415             lines[NR] = $0
 416             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
 417             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
 418             l = length
 419             if (maxlen < l) maxlen = l
 420         }
 421 
 422         END {
 423             n = (width - maxlen) / 2
 424             if (n % 1) n = n - (n % 1)
 425             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
 426             for (i = 1; i <= NR; i++) printf fmt, "", lines[i]
 427         }
 428     ' "$@"
 429 }
 430 
 431 # Colored Go Test on the folder given; uses my command `gbmawk`
 432 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; }
 433 
 434 # ignore final life-feed from text, if it's the very last byte; also ignore
 435 # all trailing carriage-returns
 436 choplf() {
 437     awk '
 438         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 439         NR > 1 { print ""; fflush() }
 440         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 441     ' "$@"
 442 }
 443 
 444 # Color Json using the `jq` app, allowing an optional filepath as the data
 445 # source, and even an optional transformation formula
 446 cj() { jq -C "${2:-.}" "${1:--}"; }
 447 
 448 # clean the screen, after running the command given
 449 # clean() { tput smcup; "$@"; tput rmcup; }
 450 
 451 # show a live digital clock
 452 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 453 
 454 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 455 # also emitting colors when piped
 456 clrg() { rg --color=always --line-buffered "$@"; }
 457 
 458 # CLear Screen, like the old dos command of the same name
 459 cls() { clear; }
 460 
 461 # COunt COndition: count how many times the AWK expression given is true
 462 coco() {
 463     local cond="${1:-1}"
 464     [ $# -gt 0 ] && shift
 465     awk "
 466         { low = lower = tolower(\$0) }
 467         ${cond} { count++ }
 468         END { print count }
 469     " "$@"
 470 }
 471 
 472 # Colored RipGrep ensures app `rg` emits colors when piped
 473 crg() { rg --color=always --line-buffered "$@"; }
 474 
 475 # emit a line with a repeating cross-like symbol in it
 476 crosses() {
 477     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 478 }
 479 
 480 # split lines using the string given, turning them into single-item lines
 481 crumble() {
 482     local sep="${1:- }"
 483     [ $# -gt 0 ] && shift
 484     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 485 }
 486 
 487 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 488 csv2tsv() { xsv fmt -t '\t' "$@"; }
 489 
 490 # Change Units turns common US units into international ones; uses my
 491 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 492 cu() {
 493     bu "$@" | awk '
 494         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 495         NF == 4 && $NF != "s" { print $NF }
 496     ' | nn --gray
 497 }
 498 
 499 # CURL Silent spares you the progress bar, but still tells you about errors
 500 curls() { curl --show-error -s "$@"; }
 501 
 502 # Count With AWK: count the times the AWK expression/condition given is true
 503 cwawk() {
 504     local cond="${1:-1}"
 505     [ $# -gt 0 ] && shift
 506     awk "
 507         { low = lower = tolower(\$0) }
 508         ${cond} { count++ }
 509         END { print count }
 510     " "$@"
 511 }
 512 
 513 # listen to streaming DANCE music
 514 dance() {
 515     printf "streaming \e[7mDance Wave Retro\e[0m\n"
 516     # mpv --quiet https://retro.dancewave.online/retrodance.mp3
 517     mpv --really-quiet https://retro.dancewave.online/retrodance.mp3
 518 }
 519 
 520 # emit a line with a repeating dash-like symbol in it
 521 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 522 
 523 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 524 # the leading data-URI declaration, if present
 525 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 526 
 527 # DECAPitate (lines) emits the first line as is, piping all lines after that
 528 # to the command given, passing all/any arguments/options to it
 529 # decap() {
 530 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 531 # }
 532 
 533 # turn Comma-Separated-Values tables into tab-separated-values tables
 534 # decsv() { xsv fmt -t '\t' "$@"; }
 535 
 536 # DEDUPlicate prevents lines from appearing more than once
 537 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 538 
 539 # dictionary-DEFine the word given, using an online service
 540 def() {
 541     local arg
 542     local gap=0
 543     for arg in "$@"; do
 544         [ "${gap}" -gt 0 ] && printf "\n"
 545         gap=1
 546         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 547         curl -s "dict://dict.org/d:${arg}" | awk '
 548             { gsub(/\r$/, "") }
 549             /^151 / {
 550                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 551                 next
 552             }
 553             /^[1-9][0-9]{2} / {
 554                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 555                 next
 556             }
 557             { print; fflush() }
 558         '
 559     done | less -JMKiCRS
 560 }
 561 
 562 # dictionary-define the word given, using an online service
 563 define() {
 564     local arg
 565     local gap=0
 566     for arg in "$@"; do
 567         [ "${gap}" -gt 0 ] && printf "\n"
 568         gap=1
 569         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 570         curl -s "dict://dict.org/d:${arg}" | awk '
 571             { gsub(/\r$/, "") }
 572             /^151 / {
 573                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 574                 next
 575             }
 576             /^[1-9][0-9]{2} / {
 577                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 578                 next
 579             }
 580             { print; fflush() }
 581         '
 582     done | less -JMKiCRS
 583 }
 584 
 585 # DEcompress GZip-encoded data
 586 # degz() { zcat "$@"; }
 587 
 588 # turn JSON Lines into a proper json array
 589 dejsonl() { jq -s -M "${@:-.}"; }
 590 
 591 # delay lines from the standard-input, waiting the number of seconds given
 592 # for each line, or waiting 1 second by default
 593 # delay() {
 594 #     local seconds="${1:-1}"
 595 #     (
 596 #         IFS="$(printf "\n")"
 597 #         while read -r line; do
 598 #             sleep "${seconds}"
 599 #             printf "%s\n" "${line}"
 600 #         done
 601 #     )
 602 # }
 603 
 604 # expand tabs each into up to the number of space given, or 4 by default
 605 detab() { expand -t "${1:-4}"; }
 606 
 607 # ignore trailing spaces, as well as trailing carriage returns
 608 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 609 
 610 # turn UTF-16 data into UTF-8
 611 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 612 
 613 # DIVide 2 numbers 3 ways, including the complement
 614 div() {
 615     awk -v a="${1:-1}" -v b="${2:-1}" '
 616         BEGIN {
 617             gsub(/_/, "", a)
 618             gsub(/_/, "", b)
 619             if (a > b) { c = a; a = b; b = c }
 620             c = 1 - a / b
 621             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 622             else printf "%f\n%f\n", a / b, b / a
 623             exit
 624         }'
 625 }
 626 
 627 # get/fetch data from the filename or URI given; named `dog` because dogs can
 628 # `fetch` things for you
 629 # dog() {
 630 #     if [ $# -gt 1 ]; then
 631 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 632 #         return 1
 633 #     fi
 634 #
 635 #     if [ -e "$1" ]; then
 636 #         cat "$1"
 637 #         return $?
 638 #     fi
 639 #
 640 #     case "${1:--}" in
 641 #         -) cat -;;
 642 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 643 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 644 #         dict://*|telnet://*) curl --show-error -s "$1";;
 645 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 646 #         *) curl --show-error -s "https://$1";;
 647 #     esac 2> /dev/null || {
 648 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 649 #         return 1
 650 #     }
 651 # }
 652 
 653 # emit a line with a repeating dot-like symbol in it
 654 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 655 
 656 # ignore/remove all matched regexes given on all stdin lines
 657 drop() {
 658     awk '
 659         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 660         {
 661             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 662             print; fflush()
 663         }
 664     ' "${@:-\r$}"
 665 }
 666 
 667 # show the current Date and Time
 668 dt() {
 669     printf "\e[32m%s\e[0m  \e[34m%s\e[0m\n" "$(date +'%a %b %d')" "$(date +%T)"
 670 }
 671 
 672 # show the current Date, Time, and a Calendar with the 3 `current` months
 673 dtc() {
 674     {
 675         # show the current date/time center-aligned
 676         printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
 677             "" "$(date +'%a %b %d')" "$(date +%T)"
 678         # debian linux has a different `cal` app which highlights the day
 679         if [ -e "/usr/bin/ncal" ]; then
 680             # fix debian/ncal's weird way to highlight the current day
 681             ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
 682         else
 683             cal -3
 684         fi
 685     } | less -JMKiCRS
 686 }
 687 
 688 # quick alias for `echo`
 689 e() { echo "$@"; }
 690 
 691 e4() { expand -t 4 "$@"; }
 692 
 693 e8() { expand -t 8 "$@"; }
 694 
 695 # Evaluate Awk expression
 696 ea() {
 697     local expr="${1:-0}"
 698     [ $# -gt 0 ] && shift
 699     awk "BEGIN { print ${expr}; exit }" "$@"
 700 }
 701 
 702 # EDit RUN shell commands, using an interactive editor
 703 edrun() { . <( micro -readonly true -filetype shell | leak --inv ); }
 704 
 705 # Extended-mode Grep, enabling its full regex syntax
 706 eg() { grep -E --line-buffered "$@"; }
 707 
 708 # Extended Grep, Recursive Interactive and Plain
 709 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 710 
 711 # show all empty files in a folder, digging recursively
 712 emptyfiles() {
 713     local arg
 714     for arg in "${@:-.}"; do
 715         if [ ! -d "${arg}" ]; then
 716             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 717             return 1
 718         fi
 719         stdbuf -oL find "${arg}" -type f -size 0c
 720     done
 721 }
 722 
 723 # Evaluate Nodejs expression
 724 # en() {
 725 #     local expr="${1:-null}"
 726 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 727 #     node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 728 # }
 729 
 730 # Evaluate Python expression
 731 ep() { python -c "print(${1:-None})"; }
 732 
 733 # Extended Plain Interactive Grep
 734 epig() { ugrep --color=never -Q -E "$@"; }
 735 
 736 # Extended Plain Recursive Interactive Grep
 737 eprig() { ugrep --color=never -Q -E "$@"; }
 738 
 739 # Evaluate Ruby expression
 740 # er() { ruby -e "puts ${1:-nil}"; }
 741 
 742 # Edit Run shell commands, using an interactive editor
 743 er() { . <( micro -readonly true -filetype shell | leak --inv ); }
 744 
 745 # ignore/remove all matched regexes given on all stdin lines
 746 erase() {
 747     awk '
 748         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 749         {
 750             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 751             print; fflush()
 752         }
 753     ' "${@:-\r$}"
 754 }
 755 
 756 # Editor Read-Only
 757 ero() { micro -readonly true "$@"; }
 758 
 759 # Extended-mode Sed, enabling its full regex syntax
 760 es() { sed -E -u "$@"; }
 761 
 762 # Expand Tabs each into up to the number of space given, or 4 by default
 763 et() { expand -t "${1:-4}"; }
 764 
 765 # convert EURos into CAnadian Dollars, using the latest official exchange
 766 # rates from the bank of canada; during weekends, the latest rate may be
 767 # from a few days ago; the default amount of euros to convert is 1, when
 768 # not given
 769 eur2cad() {
 770     local site='https://www.bankofcanada.ca/valet/observations/group'
 771     local csv_rates="${site}/FX_RATES_DAILY/csv"
 772     local url
 773     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 774     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 775         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 776         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 777 }
 778 
 779 # EValuate AWK expression
 780 evawk() {
 781     local expr="${1:-0}"
 782     [ $# -gt 0 ] && shift
 783     awk "BEGIN { print ${expr}; exit }" "$@"
 784 }
 785 
 786 # convert fahrenheit into celsius
 787 fahrenheit() {
 788     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 789         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 790 }
 791 
 792 # Flushed AWK
 793 fawk() { stdbuf -oL awk "$@"; }
 794 
 795 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 796 fetch() {
 797     local a
 798     for a in "$@"; do
 799         case "$a" in
 800             file://*|https://*|http://*) curl --show-error -s "$a";;
 801             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 802             dict://*|telnet://*) curl --show-error -s "$a";;
 803             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 804             *) curl --show-error -s "https://$a";;
 805         esac
 806     done
 807 }
 808 
 809 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 810 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 811 
 812 # show all files in a folder, digging recursively
 813 files() {
 814     local arg
 815     for arg in "${@:-.}"; do
 816         if [ ! -d "${arg}" ]; then
 817             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 818             return 1
 819         fi
 820         stdbuf -oL find "${arg}" -type f
 821     done
 822 }
 823 
 824 # recursively find all files with fewer bytes than the number given
 825 filesunder() {
 826     local n
 827     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 828     [ $# -gt 0 ] && shift
 829 
 830     local arg
 831     for arg in "${@:-.}"; do
 832         if [ ! -d "${arg}" ]; then
 833             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 834             return 1
 835         fi
 836         stdbuf -oL find "${arg}" -type f -size -"$n"c
 837     done
 838 }
 839 
 840 # get the first n lines, or 1 by default
 841 first() { head -n "${1:-1}" "${2:--}"; }
 842 
 843 # limit data up to the first n bytes
 844 firstbytes() { head -c "$1" "${2:--}"; }
 845 
 846 # get the first n lines, or 1 by default
 847 firstlines() { head -n "${1:-1}" "${2:--}"; }
 848 
 849 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 850 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 851 # and ensuring each input's last line ends with a line-feed
 852 fixlines() {
 853     awk '
 854         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 855         { gsub(/\r$/, ""); print; fflush() }
 856     ' "$@"
 857 }
 858 
 859 # FLushed AWK
 860 # flawk() { stdbuf -oL awk "$@"; }
 861 
 862 # First Line AWK, emits the first line as is, and uses the rest of the args
 863 # given by injecting the first into the script, and passing all later args as
 864 # later args to `awk` as given
 865 flawk() {
 866     local code="${1:-1}"
 867     [ $# -gt 0 ] && shift
 868     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 869 }
 870 
 871 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 872 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 873 # involving several steps
 874 fleak() {
 875     awk '
 876         {
 877             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 878             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 879             print; fflush()
 880         }
 881     ' "$@"
 882 }
 883 
 884 # try to run the command given using line-buffering for its (standard) output
 885 flushlines() { stdbuf -oL "$@"; }
 886 
 887 # show all folders in a folder, digging recursively
 888 folders() {
 889     local arg
 890     for arg in "${@:-.}"; do
 891         if [ ! -d "${arg}" ]; then
 892             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 893             return 1
 894         fi
 895         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 896     done
 897 }
 898 
 899 # start from the line number given, skipping all previous ones
 900 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 901 
 902 # convert FeeT into meters
 903 ft() {
 904     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 905         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 906 }
 907 
 908 # convert FeeT² (squared) into meters²
 909 ft2() {
 910     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 911         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 912 }
 913 
 914 # Get/fetch data from the filenames/URIs given; uses my script `get`
 915 # g() { get "$@"; }
 916 
 917 # run `grep` in extended-regex mode, enabling its full regex syntax
 918 # g() { grep -E --line-buffered "$@"; }
 919 
 920 # convert GALlons into liters
 921 gal() {
 922     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 923         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 924 }
 925 
 926 # convert binary GigaBytes into bytes
 927 gb() {
 928     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 929         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 930         sed 's-\.00*$--'
 931 }
 932 
 933 # glue/stick together various lines, only emitting a line-feed at the end; an
 934 # optional argument is the output-item-separator, which is empty by default
 935 glue() {
 936     local sep="${1:-}"
 937     [ $# -gt 0 ] && shift
 938     awk -v sep="${sep}" '
 939         NR > 1 { printf "%s", sep }
 940         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 941         END { if (NR > 0) print ""; fflush() }
 942     ' "$@"
 943 }
 944 
 945 # GO Build Stripped: a common use-case for the go compiler
 946 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 947 
 948 # GO DEPendencieS: show all dependencies in a go project
 949 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 950 
 951 # GO IMPortS: show all imports in a go project
 952 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
 953 
 954 # go to the folder picked using an interactive TUI; uses my script `bf`
 955 goto() {
 956     local where
 957     where="$(bf "${1:-.}")"
 958     if [ $? -ne 0 ]; then
 959         return 0
 960     fi
 961 
 962     where="$(realpath "${where}")"
 963     if [ ! -d "${where}" ]; then
 964         where="$(dirname "${where}")"
 965     fi
 966     cd "${where}" || return
 967 }
 968 
 969 # GRayed-out lines with AWK
 970 grawk() {
 971     local cond="${1:-1}"
 972     [ $# -gt 0 ] && shift
 973     awk "${cond}"' {
 974             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 975             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 976             next
 977         }
 978         { print; fflush() }
 979     ' "$@"
 980 }
 981 
 982 # Style lines using a GRAY-colored BACKground
 983 grayback() {
 984     awk '
 985         {
 986             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
 987             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
 988         }
 989     ' "$@"
 990 }
 991 
 992 # Grep, Recursive Interactive and Plain
 993 # grip() { ugrep -r -Q --color=never -E "$@"; }
 994 
 995 # Global extended regex SUBstitute, using the AWK function of the same name:
 996 # arguments are used as regex/replacement pairs, in that order
 997 gsub() {
 998     awk '
 999         BEGIN {
1000             for (i = 1; i < ARGC; i++) {
1001                 args[++n] = ARGV[i]
1002                 delete ARGV[i]
1003             }
1004         }
1005         {
1006             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1007             print; fflush()
1008         }
1009     ' "$@"
1010 }
1011 
1012 # Highlight (lines) with AWK
1013 hawk() {
1014     local cond="${1:-1}"
1015     [ $# -gt 0 ] && shift
1016     awk '
1017         { low = lower = tolower($0) }
1018         '"${cond}"' {
1019             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1020             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1021             next
1022         }
1023         { print; fflush() }
1024     ' "$@"
1025 }
1026 
1027 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1028 # second by default; uses my script `waveout`
1029 heartbeat() {
1030     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1031     local b='((12, u), (8, (u-0.25)%1))'
1032     local f="sum($a for v in $b) / 2"
1033     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1034     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1035     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1036 }
1037 
1038 # Highlighted-style ECHO
1039 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1040 
1041 # show each byte as a pair of HEXadecimal (base-16) symbols
1042 hexify() {
1043     cat "$@" | od -x -A n |
1044         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1045 }
1046 
1047 # HIghlighted-style ECHO
1048 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1049 
1050 # highlight lines
1051 highlight() {
1052     awk '
1053         {
1054             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1055             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1056         }
1057     ' "$@"
1058 }
1059 
1060 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1061 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1062 # involving several steps
1063 hileak() {
1064     awk '
1065         {
1066             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1067             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1068             print; fflush()
1069         }
1070     ' "$@"
1071 }
1072 
1073 # highlight lines
1074 hilite() {
1075     awk '
1076         {
1077             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1078             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1079         }
1080     ' "$@"
1081 }
1082 
1083 # Help Me Remember my custom shell commands
1084 hmr() {
1085     local cmd="bat"
1086     # debian linux uses a different name for the `bat` app
1087     if [ -e "/usr/bin/batcat" ]; then
1088         cmd="batcat"
1089     fi
1090 
1091     "$cmd" \
1092         --style=plain,header,numbers --theme='Monokai Extended Light' \
1093         --wrap=never --color=always "$(which clam)" |
1094             sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1095 }
1096 
1097 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1098 hms() {
1099     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1100         x = $0
1101         h = (x - x % 3600) / 3600
1102         m = (x % 3600) / 60
1103         s = x % 60
1104         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1105     }'
1106 }
1107 
1108 # find all hyperlinks inside HREF attributes in the input text
1109 href() {
1110     awk '
1111         BEGIN { e = "href=\"[^\"]+\"" }
1112         {
1113             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1114                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1115             }
1116         }
1117     ' "$@"
1118 }
1119 
1120 # Index all lines starting from 0, using a tab right after each line number
1121 # i() {
1122 #     local start="${1:-0}"
1123 #     [ $# -gt 0 ] && shift
1124 #     nl -b a -w 1 -v "${start}" "$@"
1125 # }
1126 
1127 # Index all lines starting from 0, using a tab right after each line number
1128 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1129 
1130 # avoid/ignore lines which case-insensitively match any of the regexes given
1131 iavoid() {
1132     awk '
1133         BEGIN {
1134             if (IGNORECASE == "") {
1135                 m = "this variant of AWK lacks case-insensitive regex-matching"
1136                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1137                 exit 125
1138             }
1139             IGNORECASE = 1
1140 
1141             for (i = 1; i < ARGC; i++) {
1142                 e[i] = ARGV[i]
1143                 delete ARGV[i]
1144             }
1145         }
1146 
1147         {
1148             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1149             print; fflush(); got++
1150         }
1151 
1152         END { exit(got == 0) }
1153     ' "${@:-^\r?$}"
1154 }
1155 
1156 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1157 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1158 
1159 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1160 idrop() {
1161     awk '
1162         BEGIN {
1163             if (IGNORECASE == "") {
1164                 m = "this variant of AWK lacks case-insensitive regex-matching"
1165                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1166                 exit 125
1167             }
1168             IGNORECASE = 1
1169 
1170             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1171         }
1172 
1173         {
1174             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1175             print; fflush()
1176         }
1177     ' "${@:-\r$}"
1178 }
1179 
1180 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1181 ierase() {
1182     awk '
1183         BEGIN {
1184             if (IGNORECASE == "") {
1185                 m = "this variant of AWK lacks case-insensitive regex-matching"
1186                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1187                 exit 125
1188             }
1189             IGNORECASE = 1
1190 
1191             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1192         }
1193 
1194         {
1195             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1196             print; fflush()
1197         }
1198     ' "${@:-\r$}"
1199 }
1200 
1201 # ignore command in a pipe: this allows quick re-editing of pipes, while
1202 # still leaving signs of previously-used steps, as a memo
1203 ignore() { cat; }
1204 
1205 # only keep lines which case-insensitively match any of the regexes given
1206 imatch() {
1207     awk '
1208         BEGIN {
1209             if (IGNORECASE == "") {
1210                 m = "this variant of AWK lacks case-insensitive regex-matching"
1211                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1212                 exit 125
1213             }
1214             IGNORECASE = 1
1215 
1216             for (i = 1; i < ARGC; i++) {
1217                 e[i] = ARGV[i]
1218                 delete ARGV[i]
1219             }
1220         }
1221 
1222         {
1223             for (i = 1; i < ARGC; i++) {
1224                 if ($0 ~ e[i]) {
1225                     print; fflush()
1226                     got++
1227                     next
1228                 }
1229             }
1230         }
1231 
1232         END { exit(got == 0) }
1233     ' "${@:-[^\r]}"
1234 }
1235 
1236 # start each non-empty line with extra n spaces
1237 indent() {
1238     awk '
1239         BEGIN {
1240             n = ARGV[1] + 0
1241             delete ARGV[1]
1242             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
1243         }
1244 
1245         /^\r?$/ { print ""; fflush(); next }
1246         { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() }
1247     ' "$@"
1248 }
1249 
1250 # listen to INTENSE streaming radio
1251 intense() {
1252     printf "streaming \e[7mIntense Radio\e[0m\n"
1253     mpv --quiet https://secure.live-streams.nl/flac.flac
1254 }
1255 
1256 # emit each word-like item from each input line on its own line; when a file
1257 # has tabs on its first line, items are split using tabs alone, which allows
1258 # items to have spaces in them
1259 items() {
1260     awk '
1261         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
1262         { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() }
1263     ' "$@"
1264 }
1265 
1266 # case-insensitively deduplicate lines, keeping them in their original order:
1267 # the checking/matching is case-insensitive, but each first match is output
1268 # exactly as is
1269 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1270 
1271 # shrink/compact Json data, allowing an optional filepath
1272 # j0() { python -m json.tool --compact "${1:--}"; }
1273 
1274 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1275 # even an optional transformation formula after that
1276 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1277 
1278 # show Json data on multiple lines, using 2 spaces for each indentation level,
1279 # allowing an optional filepath
1280 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1281 
1282 # show Json data on multiple lines, using 2 spaces for each indentation level,
1283 # allowing an optional filepath, and even an optional transformation formula
1284 # after that
1285 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1286 
1287 # listen to streaming JAZZ music
1288 jazz() {
1289     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1290     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1291     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1292 }
1293 
1294 # show a `dad` JOKE from the web, sometimes even a very funny one
1295 # joke() {
1296 #     curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--'
1297 #     # plain-text output from previous cmd doesn't end with a line-feed
1298 #     printf "\n"
1299 # }
1300 
1301 # show a `dad` JOKE from the web, sometimes even a very funny one
1302 joke() {
1303     curl --show-error -s https://icanhazdadjoke.com | fold -s |
1304         awk '{ gsub(/ *\r?$/, ""); print }'
1305 }
1306 
1307 # shrink/compact JSON data, allowing an optional filepath
1308 # json0() { python -m json.tool --compact "${1:--}"; }
1309 
1310 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1311 # even an optional transformation formula after that
1312 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1313 
1314 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1315 # allowing an optional filepath
1316 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1317 
1318 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1319 # allowing an optional filepath, and even an optional transformation formula
1320 # after that
1321 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1322 
1323 # turn JSON Lines into a proper JSON array
1324 jsonl2json() { jq -s -M "${@:-.}"; }
1325 
1326 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1327 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1328 
1329 # only keep the file-extension part from lines ending with file-extensions
1330 # justext() {
1331 #     awk '
1332 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1333 #     ' "$@"
1334 # }
1335 
1336 # only keep the file-extension part from lines ending with file-extensions
1337 justext() {
1338     awk '
1339         !/^\./ && /\./ {
1340             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1341                 print substr($0, RSTART, RLENGTH); fflush()
1342             }
1343         }
1344     ' "$@"
1345 }
1346 
1347 # only keep lines ending with a file-extension of any popular picture format
1348 justpictures() {
1349     awk '
1350         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1351             gsub(/ *\r?$/, ""); print; fflush()
1352         }
1353     ' "$@"
1354 }
1355 
1356 # only keep lines ending with a file-extension of any popular sound format
1357 justsounds() {
1358     awk '
1359         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1360             gsub(/ *\r?$/, ""); print; fflush()
1361         }
1362     ' "$@"
1363 }
1364 
1365 # only keep lines ending with a file-extension of any popular video format
1366 justvideos() {
1367     awk '
1368         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1369             gsub(/ *\r?$/, ""); print; fflush()
1370         }
1371     ' "$@"
1372 }
1373 
1374 # convert binary KiloBytes into bytes
1375 kb() {
1376     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1377         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1378         sed 's-\.00*$--'
1379 }
1380 
1381 # run `less`, showing line numbers, among other settings
1382 l() { less -JMKNiCRS "$@"; }
1383 
1384 # Like A Book groups lines as 2 side-by-side pages, the same way books
1385 # do it; uses my script `book`
1386 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1387 
1388 # find the LAN (local-area network) IP address for this device
1389 lanip() { hostname -I; }
1390 
1391 # Line xARGS: `xargs` using line separators, which handles filepaths
1392 # with spaces, as long as the standard input has 1 path per line
1393 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; }
1394 
1395 # get the last n lines, or 1 by default
1396 # last() { tail -n "${1:-1}" "${2:--}"; }
1397 
1398 # get up to the last given number of bytes
1399 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1400 
1401 # get the last n lines, or 1 by default
1402 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1403 
1404 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1405 # as given, and where all other symbols become question marks, one question
1406 # mark for each code-point byte
1407 latinize() {
1408     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1409 }
1410 
1411 # Lowercased (lines) AWK
1412 lawk() {
1413     local code="${1:-1}"
1414     [ $# -gt 0 ] && shift
1415     awk "
1416         {
1417             line = orig = original = \$0
1418             low = lower = tolower(\$0)
1419             \$0 = lower
1420         }
1421         ${code}
1422         { fflush() }
1423     " "$@";
1424 }
1425 
1426 # convert pounds (LB) into kilograms
1427 lb() {
1428     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1429         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1430 }
1431 
1432 # turn the first n space-separated fields on each line into tab-separated
1433 # ones; this behavior is useful to make the output of many cmd-line tools
1434 # into TSV, since filenames are usually the last fields, and these may
1435 # contain spaces which aren't meant to be split into different fields
1436 leadtabs() {
1437     local n="$1"
1438     local cmd
1439     cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1440     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1441     sed -u -E "${cmd}"
1442 }
1443 
1444 # run `less`, showing line numbers, among other settings
1445 least() { less -JMKNiCRS "$@"; }
1446 
1447 # limit stops at the first n bytes, or 1024 bytes by default
1448 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1449 
1450 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1451 # and using the first n lines as a sticky-header (1 by default), so they
1452 # always show on top
1453 lh() {
1454     local n="${1:-1}"
1455     [ $# -gt 0 ] && shift
1456     less --header="$n" -JMKNiCRS "$@"
1457 }
1458 
1459 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1460 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1461 # and ensuring each input's last line ends with a line-feed
1462 lines() {
1463     awk '
1464         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1465         { gsub(/\r$/, ""); print; fflush() }
1466     ' "$@"
1467 }
1468 
1469 # regroup adjacent lines into n-item tab-separated lines
1470 lineup() {
1471     local n="${1:-0}"
1472     [ $# -gt 0 ] && shift
1473 
1474     if [ "$n" -le 0 ]; then
1475         awk '
1476             NR > 1 { printf "\t" }
1477             { printf "%s", $0; fflush() }
1478             END { if (NR > 0) print "" }
1479         ' "$@"
1480         return $?
1481     fi
1482 
1483     awk -v n="$n" '
1484         NR % n != 1 && n > 1 { printf "\t" }
1485         { printf "%s", $0; fflush() }
1486         NR % n == 0 { print ""; fflush() }
1487         END { if (NR % n != 0) print "" }
1488     ' "$@"
1489 }
1490 
1491 # find all hyperLINKS (https:// and http://) in the input text
1492 links() {
1493     awk '
1494         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1495         {
1496             # match all links in the current line
1497             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1498                 print substr(s, RSTART, RLENGTH); fflush()
1499             }
1500         }
1501     ' "$@"
1502 }
1503 
1504 # List files, using the `Long` option
1505 # ll() { ls -l "$@"; }
1506 
1507 # LOAD data from the filename or URI given; uses my script `get`
1508 load() { get "$@"; }
1509 
1510 # LOwercase line, check (awk) COndition: on each success, the original line
1511 # is output with its original letter-casing, as its lower-cased version is
1512 # only a convenience meant for the condition
1513 loco() {
1514     local cond="${1:-1}"
1515     [ $# -gt 0 ] && shift
1516     awk "
1517         {
1518             line = orig = original = \$0
1519             low = lower = tolower(\$0)
1520             \$0 = lower
1521         }
1522         ${cond} { print line; fflush() }
1523     " "$@"
1524 }
1525 
1526 # LOcal SERver webserves files in a folder as localhost, using the port
1527 # number given, or port 8080 by default
1528 loser() {
1529     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1530     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1531 }
1532 
1533 # LOWercase all ASCII symbols
1534 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1535 
1536 # LOWERcase all ASCII symbols
1537 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1538 
1539 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1540 lrg() { rg --line-buffered "$@"; }
1541 
1542 # Listen To Youtube
1543 lty() {
1544     local url
1545     # some youtube URIs end with extra playlist/tracker parameters
1546     url="$(echo "$1" | sed 's-&.*--')"
1547     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1548 }
1549 
1550 # only keep lines which match any of the regexes given
1551 match() {
1552     awk '
1553         BEGIN {
1554             for (i = 1; i < ARGC; i++) {
1555                 e[i] = ARGV[i]
1556                 delete ARGV[i]
1557             }
1558         }
1559 
1560         {
1561             for (i = 1; i < ARGC; i++) {
1562                 if ($0 ~ e[i]) {
1563                     print; fflush()
1564                     got++
1565                     next
1566                 }
1567             }
1568         }
1569 
1570         END { exit(got == 0) }
1571     ' "${@:-[^\r]}"
1572 }
1573 
1574 # MAX Width truncates lines up to the given number of items/bytes given, or up
1575 # to 80 by default; output lines end with an ANSI reset-code, in case input
1576 # lines use ANSI styles
1577 maxw() {
1578     local maxwidth="${1:-80}"
1579     [ $# -gt 0 ] && shift
1580     awk -v maxw="${maxwidth}" '
1581         {
1582             gsub(/\r$/, "")
1583             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1584         }
1585     ' "$@"
1586 }
1587 
1588 # convert binary MegaBytes into bytes
1589 mb() {
1590     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1591         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1592         sed 's-\.00*$--'
1593 }
1594 
1595 # Multi-Core MAKE runs `make` using all cores
1596 mcmake() { make -j "$(nproc)" "$@"; }
1597 
1598 # Multi-Core MaKe runs `make` using all cores
1599 mcmk() { make -j "$(nproc)" "$@"; }
1600 
1601 # merge stderr into stdout, without any ugly keyboard-dancing
1602 # merrge() { "$@" 2>&1; }
1603 
1604 # convert MIles into kilometers
1605 mi() {
1606     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1607         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1608 }
1609 
1610 # convert MIles² (squared) into kilometers²
1611 mi2() {
1612     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1613         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1614 }
1615 
1616 # Make In Folder
1617 mif() {
1618     local code
1619     pushd "${1:-.}" > /dev/null || return
1620     [ $# -gt 0 ] && shift
1621     make "$@"
1622     code=$?
1623     popd > /dev/null || return "${code}"
1624     return "${code}"
1625 }
1626 
1627 # Media INFO
1628 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1629 
1630 # Media INFO
1631 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1632 
1633 # run `make`
1634 mk() { make "$@"; }
1635 
1636 # convert Miles Per Hour into kilometers per hour
1637 mph() {
1638     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1639         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1640 }
1641 
1642 # Number all lines, using a tab right after each line number
1643 # n() {
1644 #     local start="${1:-1}"
1645 #     [ $# -gt 0 ] && shift
1646 #     nl -b a -w 1 -v "${start}" "$@"
1647 # }
1648 
1649 # Number all lines, using a tab right after each line number
1650 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1651 
1652 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1653 naman() {
1654     local w
1655     w="$(tput cols)"
1656     if [ "$w" -gt 120 ]; then
1657         w="$((w / 2 - 1))"
1658     fi
1659     MANWIDTH="$w" man "$@"
1660 }
1661 
1662 # Not AND sorts its 2 inputs, then finds lines not in common
1663 nand() {
1664     # comm -3 <(sort "$1") <(sort "$2")
1665     # dash doesn't support the process-sub syntax
1666     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1667 }
1668 
1669 # Nice Byte Count, using my scripts `nn` and `cext`
1670 nbc() { wc -c "$@" | nn --gray | cext; }
1671 
1672 # listen to streaming NEW WAVE music
1673 newwave() {
1674     printf "streaming \e[7mNew Wave radio\e[0m\n"
1675     mpv --quiet https://puma.streemlion.com:2910/stream
1676 }
1677 
1678 # NIce(r) COlumns makes the output of many commands whose output starts with
1679 # a header line easier to read; uses my script `nn`
1680 nico() {
1681     awk '
1682         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1683         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1684     ' "$@" | nn --gray | less -JMKiCRS
1685 }
1686 
1687 # emit nothing to output and/or discard everything from input
1688 nil() {
1689     if [ $# -gt 0 ]; then
1690         "$@" > /dev/null
1691     else
1692         cat < /dev/null
1693     fi
1694 }
1695 
1696 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1697 njnn() { nj "$@" | nn --gray; }
1698 
1699 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1700 nman() {
1701     local w
1702     w="$(tput cols)"
1703     if [ "$w" -gt 120 ]; then
1704         w="$((w / 2 - 1))"
1705     fi
1706     MANWIDTH="$w" man "$@"
1707 }
1708 
1709 # convert Nautical MIles into kilometers
1710 nmi() {
1711     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1712         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1713 }
1714 
1715 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1716 # noerr() { "$@" 2> /dev/null; }
1717 
1718 # play a white-noise sound lasting the number of seconds given, or for 1
1719 # second by default; uses my script `waveout`
1720 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1721 
1722 # ignore trailing spaces, as well as trailing carriage returns
1723 notrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
1724 
1725 # show the current date and time
1726 now() { date +'%Y-%m-%d %H:%M:%S'; }
1727 
1728 # Nice Processes shows/lists all current processes; uses my script `nn`
1729 np() {
1730     local res
1731     local code
1732     # res="$(ps "${@:-auxf}")"
1733     res="$(ps "${@:-aux}")"
1734     code=$?
1735     if [ "${code}" -ne 0 ]; then
1736         return "${code}"
1737     fi
1738 
1739     echo "${res}" | awk '
1740         BEGIN {
1741             d = strftime("%a %b %d")
1742             t = strftime("%H:%M:%S")
1743             # printf "%s  %s\n\n", d, t
1744             # printf "\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", d, t
1745             # printf "%30s\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", "", d, t
1746             # printf "%30s%s  %s\n\n", "", d, t
1747             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1748         }
1749 
1750         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1751 
1752         $1 == "root" {
1753             # gsub(/^/, "\x1b[36m")
1754             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[36m")
1755             gsub(/^/, "\x1b[34m")
1756             gsub(/ +/, "&\x1b[0m\x1b[34m")
1757             gsub(/$/, "\x1b[0m")
1758         }
1759 
1760         {
1761             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1762             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1763             printf "%3d  %s\n", NR - 1, $0
1764         }
1765     ' | nn --gray | less -JMKiCRS
1766 }
1767 
1768 # Nice Size, using my scripts `nn` and `cext`
1769 ns() { wc -c "$@" | nn --gray | cext; }
1770 
1771 # Nice Transform Json, using my scripts `tj`, and `nj`
1772 ntj() { tj "$@" | nj; }
1773 
1774 # Nice TimeStamp
1775 nts() {
1776     ts '%Y-%m-%d %H:%M:%S' |
1777         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1778 }
1779 
1780 # emit nothing to output and/or discard everything from input
1781 null() {
1782     if [ $# -gt 0 ]; then
1783         "$@" > /dev/null
1784     else
1785         cat < /dev/null
1786     fi
1787 }
1788 
1789 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1790 # line-feed byte
1791 nullines() {
1792     awk -v ORS='\000' '
1793         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1794         { gsub(/\r$/, ""); print; fflush() }
1795     ' "$@"
1796 }
1797 
1798 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1799 # the syntax of shell functions
1800 nwat() {
1801     local a
1802     local gap=0
1803 
1804     if [ $# -eq 0 ]; then
1805         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1806         return 1
1807     fi
1808 
1809     local cmd="bat"
1810     # debian linux uses a different name for the `bat` app
1811     if [ -e "/usr/bin/batcat" ]; then
1812         cmd="batcat"
1813     fi
1814 
1815     for a in "$@"; do
1816         [ "${gap}" -gt 0 ] && printf "\n"
1817         gap=1
1818         # printf "\e[7m%-80s\e[0m\n" "$a"
1819         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1820 
1821         # resolve 1 alias level
1822         if alias "$a" 2> /dev/null > /dev/null; then
1823             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1824         fi
1825 
1826         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1827             # resolved aliases with args/spaces in them would otherwise fail
1828             echo "$a"
1829         elif whence -f "$a" > /dev/null 2> /dev/null; then
1830             # zsh seems to show a shell function's code only via `whence -f`
1831             whence -f "$a"
1832         elif type "$a" > /dev/null 2> /dev/null; then
1833             # dash doesn't support `declare`, and `type` in bash emits
1834             # a redundant first output line, when it's a shell function
1835             type "$a" | awk '
1836                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1837                 { print; fflush() }
1838                 END { if (NR < 2 && skipped) print skipped }
1839             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1840                 --wrap=never --color=always |
1841                     sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1842         else
1843             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1844         fi
1845     done | less -JMKiCRS
1846 }
1847 
1848 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1849 # alternating styles to make long numbers easier to read
1850 # nwc() { wc "$@" | nn --gray; }
1851 
1852 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1853 # alternating styles to make long numbers easier to read
1854 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1855 
1856 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1857 # `cext`, alternating styles to make long numbers easier to read
1858 nwc() {
1859     wc "$@" | sort -rn | nn --gray | cext |
1860         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1861 }
1862 
1863 # Nice Weather Forecast
1864 nwf() {
1865     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
1866     curl --show-error -s telnet://graph.no:79 |
1867     sed -E \
1868         -e 's/ *\r?$//' \
1869         -e '/^\[/d' \
1870         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
1871         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
1872         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
1873         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
1874         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
1875         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
1876         -e 's/\*/○/g' |
1877     awk 1 |
1878     less -JMKiCRS
1879 }
1880 
1881 # Nice Zoom Json, using my scripts `zj`, and `nj`
1882 nzj() { zj "$@" | nj; }
1883 
1884 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1885 # pawk() { awk -F='' -v RS='' "$@"; }
1886 
1887 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1888 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1889 
1890 # Plain `fd`
1891 pfd() { fd --color=never "$@"; }
1892 
1893 # pick lines, using all the 1-based line-numbers given
1894 picklines() {
1895     awk '
1896         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1897         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1898         { l[++n] = $0 }
1899         END {
1900             for (i = 1; i <= m; i++) {
1901                 j = p[i]
1902                 if (j < 0) j += NR + 1
1903                 if (0 < j && j <= NR) print l[j]
1904             }
1905         }
1906     ' "$@"
1907 }
1908 
1909 # Plain Interactive Grep
1910 pig() { ugrep --color=never -Q -E "$@"; }
1911 
1912 # make text plain, by ignoring ANSI terminal styling
1913 plain() {
1914     awk '
1915         {
1916             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1917             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
1918             print; fflush()
1919         }
1920     ' "$@"
1921 }
1922 
1923 # end all lines with an ANSI-code to reset styles
1924 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1925 
1926 # end all lines with an ANSI-code to reset styles
1927 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1928 
1929 # play audio/video media
1930 # play() { mplayer -msglevel all=-1 "${@:--}"; }
1931 
1932 # play audio/video media
1933 play() { mpv "${@:--}"; }
1934 
1935 # Pick LINE, using the 1-based line-number given
1936 pline() {
1937     local line="$1"
1938     [ $# -gt 0 ] && shift
1939     awk -v n="${line}" '
1940         BEGIN { if (n < 1) exit 0 }
1941         NR == n { print; exit 0 }
1942     ' "$@"
1943 }
1944 
1945 # Paused MPV; especially useful when trying to view pictures via `mpv`
1946 pmpv() { mpv --pause "${@:--}"; }
1947 
1948 # Print Python result
1949 pp() { python -c "print($1)"; }
1950 
1951 # PRecede (input) ECHO, prepends a first line to stdin lines
1952 precho() { echo "$@" && cat /dev/stdin; }
1953 
1954 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
1955 prememo() {
1956     awk '
1957         BEGIN {
1958             if (ARGC > 1) printf "\x1b[7m"
1959             for (i = 1; i < ARGC; i++) {
1960                 if (i > 1) printf " "
1961                 printf "%s", ARGV[i]
1962                 delete ARGV[i]
1963             }
1964             if (ARGC > 1) printf "\x1b[0m\n"
1965             fflush()
1966         }
1967         { print; fflush() }
1968     ' "$@"
1969 }
1970 
1971 # start by joining all arguments given as a tab-separated-items line of output,
1972 # followed by all lines from stdin verbatim
1973 pretsv() {
1974     awk '
1975         BEGIN {
1976             for (i = 1; i < ARGC; i++) {
1977                 if (i > 1) printf "\t"
1978                 printf "%s", ARGV[i]
1979                 delete ARGV[i]
1980             }
1981             if (ARGC > 1) printf "\n"
1982             fflush()
1983         }
1984         { print; fflush() }
1985     ' "$@"
1986 }
1987 
1988 # Plain Recursive Interactive Grep
1989 prig() { ugrep --color=never -r -Q -E "$@"; }
1990 
1991 # show/list all current processes
1992 processes() {
1993     local res
1994     res="$(ps aux)"
1995     echo "${res}" | awk '!/ps aux$/' | sed -E \
1996         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
1997         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
1998 }
1999 
2000 # Play Youtube Audio
2001 pya() {
2002     local url
2003     # some youtube URIs end with extra playlist/tracker parameters
2004     url="$(echo "$1" | sed 's-&.*--')"
2005     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2006 }
2007 
2008 # Quiet ignores stderr, without any ugly keyboard-dancing
2009 q() { "$@" 2> /dev/null; }
2010 
2011 # Quiet MPV
2012 qmpv() { mpv --quiet "${@:--}"; }
2013 
2014 # ignore stderr, without any ugly keyboard-dancing
2015 quiet() { "$@" 2> /dev/null; }
2016 
2017 # Reset the screen, which empties it and resets the current style
2018 r() { reset; }
2019 
2020 # keep only lines between the 2 line numbers given, inclusively
2021 rangelines() {
2022     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
2023         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
2024 }
2025 
2026 # RANdom MANual page
2027 ranman() {
2028     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
2029         sed 's-\.gz$--' | xargs man
2030 }
2031 
2032 # Run AWK expression
2033 rawk() {
2034     local expr="${1:-0}"
2035     [ $# -gt 0 ] && shift
2036     awk "BEGIN { print ${expr}; exit }" "$@"
2037 }
2038 
2039 # play a ready-phone-line sound lasting the number of seconds given, or for 1
2040 # second by default; uses my script `waveout`
2041 ready() {
2042     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
2043     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2044 }
2045 
2046 # reflow/trim lines of prose (text) to improve its legibility: it's especially
2047 # useful when the text is pasted from web-pages being viewed in reader mode
2048 reprose() {
2049     local w="${1:-80}"
2050     [ $# -gt 0 ] && shift
2051     awk '
2052         FNR == 1 && NR > 1 { print "" }
2053         { gsub(/\r$/, ""); print; fflush() }
2054     ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--'
2055 }
2056 
2057 # ignore ansi styles from stdin and restyle things using the style-name given;
2058 # uses my script `style`
2059 restyle() { style "$@"; }
2060 
2061 # change the tab-title on your terminal app
2062 retitle() { printf "\e]0;%s\a\n" "$*"; }
2063 
2064 # REVerse-order SIZE (byte-count)
2065 revsize() { wc -c "$@" | sort -rn; }
2066 
2067 # Run In Folder
2068 rif() {
2069     local code
2070     pushd "${1:-.}" > /dev/null || return
2071     [ $# -gt 0 ] && shift
2072     "$@"
2073     code=$?
2074     popd > /dev/null || return "${code}"
2075     return "${code}"
2076 }
2077 
2078 # play a ringtone-style sound lasting the number of seconds given, or for 1
2079 # second by default; uses my script `waveout`
2080 ringtone() {
2081     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2082     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2083 }
2084 
2085 # Read-Only Editor
2086 roe() { micro -readonly true "$@"; }
2087 
2088 # Read-Only Micro (text editor)
2089 rom() { micro -readonly true "$@"; }
2090 
2091 # run the command given, trying to turn its output into TSV (tab-separated
2092 # values); uses my script `dejson`
2093 rtab() { jc "$@" | dejson; }
2094 
2095 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2096 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2097 
2098 # show a RULER-like width-measuring line
2099 # ruler() {
2100 #     local n="${1:-$(tput cols)}"
2101 #     [ "${n}" -gt 0 ] && printf "%${n}s\n" "" |
2102 #         sed -E 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2103 # }
2104 
2105 # show a RULER-like width-measuring line
2106 ruler() {
2107     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2108         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2109 }
2110 
2111 # run the command given, trying to turn its output into TSV (tab-separated
2112 # values); uses my script `dejson`
2113 runtab() { jc "$@" | dejson; }
2114 
2115 # run the command given, trying to turn its output into TSV (tab-separated
2116 # values); uses my script `dejson`
2117 runtsv() { jc "$@" | dejson; }
2118 
2119 # Reverse-order WC
2120 rwc() { wc "$@" | sort -rn; }
2121 
2122 # extended-mode Sed, enabling its full regex syntax
2123 # s() { sed -E -u "$@"; }
2124 
2125 # Substitute using `sed`, enabling its full regex syntax
2126 s() { sed -E -u "$(printf "s\xff$1\xff$2\xffg")"; }
2127 
2128 # Silent CURL spares you the progress bar, but still tells you about errors
2129 scurl() { curl --show-error -s "$@"; }
2130 
2131 # show a unique-looking SEParator line; useful to run between commands
2132 # which output walls of text
2133 sep() {
2134     [ "${1:-80}" -gt 0 ] &&
2135         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2136 }
2137 
2138 # webSERVE files in a folder as localhost, using the port number given, or
2139 # port 8080 by default
2140 serve() {
2141     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2142     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2143 }
2144 
2145 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2146 setdiff() {
2147     # comm -23 <(sort "$1") <(sort "$2")
2148     # dash doesn't support the process-sub syntax
2149     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2150 }
2151 
2152 # SET INtersection, sorts its 2 inputs, then finds common lines
2153 setin() {
2154     # comm -12 <(sort "$1") <(sort "$2")
2155     # dash doesn't support the process-sub syntax
2156     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2157 }
2158 
2159 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2160 setsub() {
2161     # comm -23 <(sort "$1") <(sort "$2")
2162     # dash doesn't support the process-sub syntax
2163     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2164 }
2165 
2166 # Show Files (and folders), coloring folders and links; uses my script `nn`
2167 sf() {
2168     ls -al --file-type --color=never --time-style iso "$@" | awk '
2169         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2170         {
2171             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2172             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2173             printf "%6d  %s\n", NR - 1, $0; fflush()
2174         }
2175     ' | nn --gray | less -JMKiCRS
2176 }
2177 
2178 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2179 # uses my scripts `nn` and `cext`
2180 sfp() {
2181     ls -al --file-type --color=never --time-style iso "$@" | awk '
2182         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2183         {
2184             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2185             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2186             printf "%6d  %s\n", NR - 1, $0; fflush()
2187         }
2188     ' | nn --gray | cext | less -JMKiCRS
2189 }
2190 
2191 # Show File Sizes, using my scripts `nn` and `cext`
2192 sfs() {
2193     # turn arg-list into single-item lines
2194     printf "%s\x00" "$@" |
2195     # calculate file-sizes, and reverse-sort results
2196     xargs -0 wc -c | sort -rn |
2197     # add/realign fields to improve legibility
2198     awk '
2199         # start output with a header-like line, and add a MiB field
2200         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2201         # make table breathe with empty lines, so tall outputs are readable
2202         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2203         # emit regular output lines
2204         {
2205             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2206             # first field is likely space-padded
2207             gsub(/^ */, "")
2208             # slice line after the first field, as filepaths can have spaces
2209             $0 = substr($0, length($1) + 1)
2210             # first field is likely space-padded
2211             gsub(/^ /, "")
2212             printf "%s\n", $0; fflush()
2213         }
2214     ' |
2215     # make zeros in the MiB field stand out with a special color
2216     awk '
2217         {
2218             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2219             print; fflush()
2220         }
2221     ' |
2222     # make numbers nice, alternating styles along 3-digit groups
2223     nn --gray |
2224     # color-code file extensions
2225     cext |
2226     # make result interactively browsable
2227     less -JMKiCRS
2228 }
2229 
2230 # SHell-run AWK output
2231 # shawk() { stdbuf -oL awk "$@" | sh; }
2232 
2233 # time-run various tools given one-per-line from stdin, giving them extra
2234 # common arguments passed as explicit arguments
2235 showdown() {
2236     awk '
2237         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2238         {
2239             printf "%s", $0
2240             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2241             printf "\x00"; fflush()
2242         }
2243     ' "$@" | xargs -0 hyperfine --style full
2244 }
2245 
2246 # SHOW a command, then RUN it
2247 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2248 
2249 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2250 # single-filepaths compatible with `xargs`, since standard shell settings
2251 # get in the way of filepaths with spaces and other special symbols in them
2252 shquote() {
2253     awk '
2254         {
2255             s = $0
2256             gsub(/\r$/, "", s)
2257             gsub(/\\/, "\\\\", s)
2258             gsub(/"/, "\\\"", s)
2259             gsub(/`/, "\\`", s)
2260             gsub(/\$/, "\\$", s)
2261             printf "\"%s\"\n", s; fflush()
2262         }
2263     ' "$@"
2264 }
2265 
2266 # clean the screen, after running the command given
2267 # sideshow() { tput smcup; "$@"; tput rmcup; }
2268 
2269 # skip the first n lines, or the 1st line by default
2270 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2271 
2272 # skip the first n bytes
2273 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2274 
2275 # skip the last n lines, or the last line by default
2276 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2277 
2278 # skip the last n bytes
2279 skiplastbytes() { head -c -"$1" "${2:--}"; }
2280 
2281 # skip the last n lines, or the last line by default
2282 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2283 
2284 # skip the first n lines, or the 1st line by default
2285 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2286 
2287 # SLOW/delay lines from the standard-input, waiting the number of seconds
2288 # given for each line, or waiting 1 second by default
2289 slow() {
2290     local seconds="${1:-1}"
2291     (
2292         IFS="$(printf "\n")"
2293         while read -r line; do
2294             sleep "${seconds}"
2295             printf "%s\n" "${line}"
2296         done
2297     )
2298 }
2299 
2300 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2301 slp() {
2302     local title
2303     title="Latest Podcast Episodes as of $(date +'%F %T')"
2304     podfeed -title "${title}" "$@" | si
2305 }
2306 
2307 # recursively find all files with fewer bytes than the number given
2308 smallfiles() {
2309     local n
2310     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2311     [ $# -gt 0 ] && shift
2312 
2313     local arg
2314     for arg in "${@:-.}"; do
2315         if [ ! -d "${arg}" ]; then
2316             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2317             return 1
2318         fi
2319         stdbuf -oL find "${arg}" -type f -size -"$n"c
2320     done
2321 }
2322 
2323 # emit the first line as is, sorting all lines after that, using the
2324 # `sort` command, passing all/any arguments/options to it
2325 sortrest() {
2326     awk -v sort="sort $*" '
2327         { gsub(/\r$/, "") }
2328         NR == 1 { print; fflush() }
2329         NR > 1 { print | sort }
2330     '
2331 }
2332 
2333 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2334 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2335 # all/any arguments/options to it
2336 sortsv() {
2337     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2338         { gsub(/\r$/, "") }
2339         NR == 1 { print; fflush() }
2340         NR > 1 { print | sort }
2341     '
2342 }
2343 
2344 # emit a line with the number of spaces given in it
2345 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2346 
2347 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2348 # in the middle of lines, as well as trailing carriage returns
2349 squeeze() {
2350     awk '
2351         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2352         {
2353             gsub(/^ +| *\r?$/, "")
2354             gsub(/ *\t */, "\t")
2355             gsub(/  +/, " ")
2356             print; fflush()
2357         }
2358     ' "$@"
2359 }
2360 
2361 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2362 # of multiple spaces in the middle of lines, as well as trailing carriage
2363 # returns, while also turning runs of empty lines into single empty lines,
2364 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2365 # lines vertically
2366 squomp() {
2367     awk '
2368         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2369         /^\r?$/ { empty = 1; next }
2370         empty { if (n > 0) print ""; empty = 0 }
2371         {
2372             gsub(/^ +| *\r?$/, "")
2373             gsub(/ *\t */, "\t")
2374             gsub(/  +/, " ")
2375             print; fflush()
2376             n++
2377         }
2378     ' "$@"
2379 }
2380 
2381 # Show a command, then Run it
2382 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2383 
2384 # turn runs of empty lines into single empty lines, effectively squeezing
2385 # paragraphs vertically, so to speak; runs of empty lines both at the start
2386 # and at the end are ignored
2387 stomp() {
2388     awk '
2389         /^\r?$/ { empty = 1; next }
2390         empty { if (n > 0) print ""; empty = 0 }
2391         { print; fflush(); n++ }
2392     ' "$@"
2393 }
2394 
2395 # STRike-thru (lines) with AWK
2396 strawk() {
2397     local cond="${1:-1}"
2398     [ $# -gt 0 ] && shift
2399     awk '
2400         { low = lower = tolower($0) }
2401         '"${cond}"' {
2402             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2403             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2404             next
2405         }
2406         { print; fflush() }
2407     ' "$@"
2408 }
2409 
2410 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2411 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2412 # all/any arguments/options to it
2413 stsv() {
2414     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2415         { gsub(/\r$/, "") }
2416         NR == 1 { print; fflush() }
2417         NR > 1 { print | sort }
2418     '
2419 }
2420 
2421 # use the result of the `awk` function `substr` for each line
2422 substr() {
2423     local start="${1:-1}"
2424     local length="${2:-80}"
2425     [ $# -gt 0 ] && shift
2426     [ $# -gt 0 ] && shift
2427     awk -v start="${start}" -v len="${length}" \
2428         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2429 }
2430 
2431 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2432 # what's given, before relinquishing existing privileges
2433 # sudoff() {
2434 #     local code=0
2435 #     if [ $# -gt 0 ]; then
2436 #         sudo "$@"
2437 #         code=$?
2438 #     fi
2439 #     sudo -k
2440 #     return "${code}"
2441 # }
2442 
2443 # append a final Tab-Separated-Values line with the sums of all columns from
2444 # the input table(s) given; items from first lines aren't counted/added
2445 sumtsv() {
2446     awk -F "\t" '
2447         {
2448             print; fflush()
2449             if (width < NF) width = NF
2450         }
2451 
2452         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2453 
2454         END {
2455             for (i = 1; i <= width; i++) {
2456                 if (i > 1) printf "\t"
2457                 printf "%s", sums[i] ""
2458             }
2459             if (width > 0) printf "\n"
2460         }
2461     ' "$@"
2462 }
2463 
2464 # show a random command defined in `clam`, using `wat` from `clam` itself
2465 surprise() {
2466     wat "$(grep -E '^[a-z]+\(' "$(which clam)" | shuf -n 1 | sed -E 's-\(.*--')"
2467 }
2468 
2469 # Time the command given
2470 t() { time "$@"; }
2471 
2472 # show a reverse-sorted tally of all lines read, where ties are sorted
2473 # alphabetically
2474 tally() {
2475     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2476         # reassure users by instantly showing the header
2477         BEGIN { print "value\ttally"; fflush() }
2478         { gsub(/\r$/, ""); t[$0]++ }
2479         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2480     ' "$@"
2481 }
2482 
2483 # Tab AWK: TSV-specific I/O settings for `awk`
2484 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2485 
2486 # Tab AWK: TSV-specific I/O settings for `awk`
2487 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2488 
2489 # quick alias for my script `tbp`
2490 tb() { tbp "$@"; }
2491 
2492 # Titled conCATenate Lines highlights each filename, before emitting its
2493 # lines
2494 tcatl() {
2495     awk '
2496         FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() }
2497         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2498         { gsub(/\r$/, ""); print; fflush() }
2499     ' "$@"
2500 }
2501 
2502 # Title ECHO changes the tab-title on your terminal app
2503 techo() { printf "\e]0;%s\a\n" "$*"; }
2504 
2505 # simulate the cadence of old-fashioned teletype machines, by slowing down
2506 # the output of ASCII/UTF-8 symbols from the standard-input
2507 # teletype() {
2508 #     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2509 #         IFS="$(printf "\n")"
2510 #         while read -r line; do
2511 #             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2512 #                 while read -r item; do
2513 #                     sleep 0.015
2514 #                     printf "%s" "${item}"
2515 #                 done
2516 #             sleep 0.75
2517 #             printf "\n"
2518 #         done
2519 #     )
2520 # }
2521 
2522 # simulate the cadence of old-fashioned teletype machines, by slowing down
2523 # the output of ASCII/UTF-8 symbols from the standard-input
2524 teletype() {
2525     awk '
2526         {
2527             gsub(/\r$/, "")
2528 
2529             n = length($0)
2530             for (i = 1; i <= n; i++) {
2531                 if (code = system("sleep 0.015")) exit code
2532                 printf "%s", substr($0, i, 1); fflush()
2533             }
2534             if (code = system("sleep 0.75")) exit code
2535             printf "\n"; fflush()
2536         }
2537     ' "$@"
2538 }
2539 
2540 # run `top` without showing any of its output after quitting it
2541 tip() { tput smcup; top "$@"; tput rmcup; }
2542 
2543 # change the tab-title on your terminal app
2544 title() { printf "\e]0;%s\a\n" "$*"; }
2545 
2546 # quick alias for my script `tjp`
2547 tj() { tjp "$@"; }
2548 
2549 # quick alias for my script `tlp`
2550 tl() { tlp "$@"; }
2551 
2552 # show current date in a specifc format
2553 today() { date +'%Y-%m-%d %a %b %d'; }
2554 
2555 # get the first n lines, or 1 by default
2556 toline() { head -n "${1:-1}" "${2:--}"; }
2557 
2558 # lowercase all ASCII symbols
2559 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2560 
2561 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2562 # second by default: after the optional duration, the next optional arguments
2563 # are the volume and the tone-frequency; uses my script `waveout`
2564 tone() {
2565     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2566         mpv --really-quiet -
2567 }
2568 
2569 # get the processes currently using the most cpu
2570 topcpu() {
2571     local n="${1:-10}"
2572     [ "$n" -gt 0 ] && ps aux | awk '
2573         NR == 1 { print; fflush() }
2574         NR > 1 { print | "sort -rnk3" }
2575     ' | head -n "$(("$n" + 1))"
2576 }
2577 
2578 # show all files directly in the folder given, without looking any deeper
2579 topfiles() {
2580     local arg
2581     for arg in "${@:-.}"; do
2582         if [ ! -d "${arg}" ]; then
2583             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2584             return 1
2585         fi
2586         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2587     done
2588 }
2589 
2590 # show all folders directly in the folder given, without looking any deeper
2591 topfolders() {
2592     local arg
2593     for arg in "${@:-.}"; do
2594         if [ ! -d "${arg}" ]; then
2595             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2596             return 1
2597         fi
2598         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2599             awk '!/^\.$/ { print; fflush() }'
2600     done
2601 }
2602 
2603 # get the processes currently using the most memory
2604 topmemory() {
2605     local n="${1:-10}"
2606     [ "$n" -gt 0 ] && ps aux | awk '
2607         NR == 1 { print; fflush() }
2608         NR > 1 { print | "sort -rnk6" }
2609     ' | head -n "$(("$n" + 1))"
2610 }
2611 
2612 # transpose (switch) rows and columns from tables
2613 transpose() {
2614     awk '
2615         { gsub(/\r$/, "") }
2616 
2617         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
2618 
2619         {
2620             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2621             if (maxitems < NF) maxitems = NF
2622         }
2623 
2624         END {
2625             for (j = 1; j <= maxitems; j++) {
2626                 for (i = 1; i <= NR; i++) {
2627                     if (i > 1) printf "\t"
2628                     printf "%s", lines[j][i]
2629                 }
2630                 printf "\n"
2631             }
2632         }
2633     ' "$@"
2634 }
2635 
2636 # ignore leading/trailing spaces, as well as trailing carriage returns
2637 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2638 
2639 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2640 # decimal dots themselves, when decimals in a number are all zeros; works
2641 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2642 # trimdecs() {
2643 #     awk '
2644 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2645 #         {
2646 #             gsub(/\r$/, "")
2647 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2648 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2649 #             print; fflush()
2650 #         }
2651 #     ' "$@"
2652 # }
2653 
2654 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2655 # decimal dots themselves, when decimals in a number are all zeros
2656 trimdecs() {
2657     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" |
2658         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2659 }
2660 
2661 # ignore trailing spaces, as well as trailing carriage returns
2662 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2663 
2664 # ignore trailing spaces, as well as trailing carriage returns
2665 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2666 
2667 # ignore leading/trailing spaces, as well as trailing carriage returns
2668 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2669 
2670 # ignore leading/trailing spaces, as well as trailing carriage returns
2671 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2672 
2673 # ignore trailing spaces, as well as trailing carriage returns
2674 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2675 
2676 # ignore trailing spaces, as well as trailing carriage returns
2677 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2678 
2679 # try running a command, emitting an explicit message to standard-error
2680 # if the command given fails
2681 try() {
2682     "$@" || {
2683         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2684         return 255
2685     }
2686 }
2687 
2688 # Transform Strings with Python; uses my script `tbp`
2689 tsp() { tbp -s "$@"; }
2690 
2691 # run the command given, trying to turn its output into TSV (tab-separated
2692 # values); uses my script `dejson`
2693 tsvrun() { jc "$@" | dejson; }
2694 
2695 # Underline (lines) with AWK
2696 uawk() {
2697     local cond="${1:-1}"
2698     [ $# -gt 0 ] && shift
2699     awk '
2700         { low = lower = tolower($0) }
2701         '"${cond}"' {
2702             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2703             printf "\x1b[4m%s\x1b[0m\n", $0; fflush()
2704             next
2705         }
2706         { print; fflush() }
2707     ' "$@"
2708 }
2709 
2710 # Underline Every few lines: make groups of 5 lines (by default) stand out by
2711 # underlining the last line of each
2712 ue() {
2713     local n="${1:-5}"
2714     [ $# -gt 0 ] && shift
2715     awk -v n="$n" '
2716         BEGIN { if (n == 0) n = -1 }
2717         NR % n == 0 && NR != 1 {
2718             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2719             printf("\x1b[4m%s\x1b[0m\n", $0); fflush()
2720             next
2721         }
2722         { print; fflush() }
2723     ' "$@"
2724 }
2725 
2726 # deduplicate lines, keeping them in their original order
2727 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2728 
2729 # concatenate all named input sources unix-style: all trailing CRLFs become
2730 # single LFs, each non-empty input will always end in a LF, so lines from
2731 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2732 # first line of each input are ignored, as those are useless at best
2733 unixify() {
2734     awk '
2735         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2736         { gsub(/\r$/, ""); print; fflush() }
2737     ' "$@"
2738 }
2739 
2740 # go UP n folders, or go up 1 folder by default
2741 up() {
2742     if [ "${1:-1}" -le 0 ]; then
2743         cd .
2744         return $?
2745     fi
2746 
2747     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2748 }
2749 
2750 # convert United States Dollars into CAnadian Dollars, using the latest
2751 # official exchange rates from the bank of canada; during weekends, the
2752 # latest rate may be from a few days ago; the default amount of usd to
2753 # convert is 1, when not given
2754 usd2cad() {
2755     local site='https://www.bankofcanada.ca/valet/observations/group'
2756     local csv_rates="${site}/FX_RATES_DAILY/csv"
2757     local url
2758     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2759     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2760         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2761         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2762 }
2763 
2764 # View with `less`
2765 v() { less -JMKiCRS "$@"; }
2766 
2767 # run a command, showing its success/failure right after
2768 verdict() {
2769     local code
2770     "$@"
2771     code=$?
2772 
2773     if [ "${code}" -eq 0 ]; then
2774         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2775     else
2776         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2777     fi
2778     return "${code}"
2779 }
2780 
2781 # run `cppcheck` with even stricter options
2782 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2783 
2784 # run `cppcheck` with even stricter options
2785 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2786 
2787 # check shell scripts for common gotchas, avoiding complaints about using
2788 # the `local` keyword, which is widely supported in practice
2789 vetshell() { shellcheck -e 3043 "$@"; }
2790 
2791 # View with Header runs `less` without line numbers, with ANSI styles, no
2792 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2793 # so they always show on top
2794 vh() {
2795     local n="${1:-1}"
2796     [ $# -gt 0 ] && shift
2797     less --header="$n" -JMKiCRS "$@"
2798 }
2799 
2800 # VIEW the result of showing a command, then RUNning it, using `less`
2801 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2802 
2803 # View Nice Columns; uses my scripts `realign` and `nn`
2804 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2805 
2806 # View Nice Hexadecimals; uses my script `nh`
2807 vnh() { nh "$@" | less -JMKiCRS; }
2808 
2809 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2810 vnj() { nj "$@" | less -JMKiCRS; }
2811 
2812 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2813 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2814 
2815 # View Nice Numbers; uses my script `nn`
2816 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2817 
2818 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2819 vnt() {
2820     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2821         nt | nn --gray |
2822         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2823         less -JMKiCRS #--header=1
2824 }
2825 
2826 # View-Run using `less`: show a command, then run it
2827 # vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less --header=1 -JMKiCRS; }
2828 
2829 # View-Run using `less`: show a command, then run it
2830 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2831 
2832 # View Text with `less`
2833 # vt() { less -JMKiCRS "$@"; }
2834 
2835 # View Text with the `micro` text-editor in read-only mode
2836 vt() { micro -readonly true "$@"; }
2837 
2838 # What are these (?); uses my command `nwat`
2839 # w() { nwat "$@"; }
2840 
2841 # What Are These (?) shows what the names given to it are/do
2842 wat() {
2843     local a
2844     local gap=0
2845 
2846     if [ $# -eq 0 ]; then
2847         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2848         return 1
2849     fi
2850 
2851     for a in "$@"; do
2852         [ "${gap}" -gt 0 ] && printf "\n"
2853         gap=1
2854         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2855         printf "\e[7m%-80s\e[0m\n" "$a"
2856 
2857         # resolve 1 alias level
2858         if alias "$a" 2> /dev/null > /dev/null; then
2859             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2860         fi
2861 
2862         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2863             # resolved aliases with args/spaces in them would otherwise fail
2864             echo "$a"
2865         elif whence -f "$a" > /dev/null 2> /dev/null; then
2866             # zsh seems to show a shell function's code only via `whence -f`
2867             whence -f "$a"
2868         elif type "$a" > /dev/null 2> /dev/null; then
2869             # dash doesn't support `declare`, and `type` in bash emits
2870             # a redundant first output line, when it's a shell function
2871             type "$a" | awk '
2872                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2873                 { print; fflush() }
2874                 END { if (NR < 2 && skipped) print skipped }
2875             '
2876         else
2877             printf "\e[31m%s not found\e[0m\n" "$a"
2878         fi
2879     done | less -JMKiCRS
2880 }
2881 
2882 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2883 # lines instead
2884 wctsv() {
2885     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2886     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2887         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2888         awk '
2889             NR > 1 { print prev; fflush() }
2890             { prev = $0 }
2891             END { if (NR == 1 || !/^total\t/) print }
2892         '
2893 }
2894 
2895 # get weather forecasts, almost filling the terminal's current width
2896 weather() {
2897     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2898     curl --show-error -s telnet://graph.no:79 |
2899     sed -E \
2900         -e 's/ *\r?$//' \
2901         -e '/^\[/d' \
2902         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
2903         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
2904         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
2905         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
2906         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
2907         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
2908         -e 's/\*/○/g' |
2909     awk 1 |
2910     less -JMKiCRS
2911 }
2912 
2913 # Weather Forecast
2914 wf() {
2915     printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 |
2916         awk '{ print; fflush() }' | less -JMKiCRS
2917 }
2918 
2919 # recursively find all files with trailing spaces/CRs
2920 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2921 
2922 # recursively find all files with trailing spaces/CRs
2923 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2924 
2925 # turn all full linux/unix-style paths (which start from the filesystem root)
2926 # detected into WINdows-style PATHS
2927 winpaths() {
2928     awk '{ print; fflush() }' "$@" |
2929         sed -u -E 's-(/mnt/([A-Za-z])(/))-\u\2:/-g'
2930 }
2931 
2932 # run `xargs`, using whole lines as extra arguments
2933 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
2934 
2935 # run `xargs`, using zero/null bytes as the extra-arguments terminator
2936 x0() { xargs -0 "$@"; }
2937 
2938 # run `xargs`, using whole lines as extra arguments
2939 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
2940 
2941 # Youtube Audio Player
2942 yap() {
2943     local url
2944     # some youtube URIs end with extra playlist/tracker parameters
2945     url="$(echo "$1" | sed 's-&.*--')"
2946     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2947 }
2948 
2949 # show a calendar for the current YEAR, or for the year given
2950 year() {
2951     {
2952         # show the current date/time center-aligned
2953         printf "%20s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
2954             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
2955         # debian linux has a different `cal` app which highlights the day
2956         if [ -e "/usr/bin/ncal" ]; then
2957             # fix debian/ncal's weird way to highlight the current day
2958             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
2959         else
2960             cal -y "$@"
2961         fi
2962     } | less -JMKiCRS
2963 }
2964 
2965 # show the current date in the YYYY-MM-DD format
2966 ymd() { date +'%Y-%m-%d'; }
2967 
2968 # YouTube Url
2969 ytu() {
2970     local url
2971     # some youtube URIs end with extra playlist/tracker parameters
2972     url="$(echo "$1" | sed 's-&.*--')"
2973     [ $# -gt 0 ] && shift
2974     yt-dlp "$@" --get-url "${url}"
2975 }
2976 
2977 # . <(
2978 #     find "$(dirname $(which clam))" -type f -print0 |
2979 #         xargs -0 -n 1 basename |
2980 #         awk '{ print "unset " $0 }'
2981 # )