File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 case "$1" in
  55     -h|--h|-help|--help)
  56         # show help message, using the info-comment from this very script
  57         awk '
  58             /^case / { exit }
  59             /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print }
  60         ' "$0"
  61         exit 0
  62     ;;
  63 esac
  64 
  65 
  66 # dash doesn't support regex-matching syntax, forcing to use case statements
  67 case "$0" in
  68     -bash|-dash|-sh|bash|dash|sh)
  69         # script is being sourced with bash or dash, which is good
  70         :
  71     ;;
  72     *)
  73         case "$ZSH_EVAL_CONTEXT" in
  74             *:file)
  75                 # script is being sourced with zsh, which is good
  76                 :
  77             ;;
  78             *)
  79                 # script is being run normally, which is a waste of time
  80 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  81 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  82                 # failing during shell-startup may deny shell access, so exit
  83                 # with a 0 error-code to declare success
  84                 exit 0
  85             ;;
  86         esac
  87     ;;
  88 esac
  89 
  90 
  91 # n-column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  92 alias 1='bsbs 1'
  93 alias 2='bsbs 2'
  94 alias 3='bsbs 3'
  95 alias 4='bsbs 4'
  96 alias 5='bsbs 5'
  97 alias 6='bsbs 6'
  98 alias 7='bsbs 7'
  99 alias 8='bsbs 8'
 100 alias 9='bsbs 9'
 101 alias 0='bsbs 10'
 102 
 103 # alias a=avoid
 104 # alias c=cat
 105 # alias e=echo
 106 # alias f=fetch
 107 # alias g=get
 108 # alias h=naman
 109 # alias m=match
 110 # alias p=plain
 111 # alias q=quiet
 112 # alias r=reset
 113 # alias t=time
 114 # alias y=year
 115 
 116 # find name from the local `apt` database of installable packages
 117 # aptfind() {
 118 #     # despite warnings, the `apt search` command has been around for years
 119 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 120 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" |
 121 #         sed -u 's/^--$//' | less -JMKiCRS
 122 # }
 123 
 124 # emit each argument given as its own line of output
 125 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 126 
 127 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 128 # their basic ASCII counterparts, and where non-ASCII symbols become question
 129 # marks, one question mark for each code-point byte
 130 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 131 
 132 # avoid/ignore lines which match any of the regexes given
 133 avoid() {
 134     awk '
 135         BEGIN {
 136             for (i = 1; i < ARGC; i++) {
 137                 e[i] = ARGV[i]
 138                 delete ARGV[i]
 139             }
 140         }
 141 
 142         {
 143             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 144             print; fflush()
 145             got++
 146         }
 147 
 148         END { exit(got == 0) }
 149     ' "${@:-^\r?$}"
 150 }
 151 
 152 # AWK Begin
 153 # awkb() { awk "BEGIN { $1; exit }"; }
 154 
 155 # AWK Begin
 156 awkb() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 157 
 158 # emit a line with a repeating ball-like symbol in it
 159 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 160 
 161 # show an ansi-styled BANNER-like line
 162 # banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 163 
 164 # show an ansi-styled BANNER-like line
 165 banner() { printf "\e[7m%-$(tput cols)s\e[0m\n" "$*"; }
 166 
 167 # emit a colored bar which can help visually separate different outputs
 168 bar() {
 169     [ "${1:-80}" -gt 0 ] &&
 170         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 171 }
 172 
 173 # process Blocks/paragraphs of non-empty lines with AWK
 174 # bawk() { awk -F='' -v RS='' "$@"; }
 175 
 176 # process Blocks/paragraphs of non-empty lines with AWK
 177 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 178 
 179 # play a repeating and annoying high-pitched beep sound a few times a second,
 180 # lasting the number of seconds given, or for 1 second by default; uses my
 181 # script `waveout`
 182 beeps() {
 183     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 184     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 185 }
 186 
 187 # start by joining all arguments given as a tab-separated-items line of output,
 188 # followed by all lines from stdin verbatim
 189 begintsv() {
 190     awk '
 191         BEGIN {
 192             for (i = 1; i < ARGC; i++) {
 193                 if (i > 1) printf "\t"
 194                 printf "%s", ARGV[i]
 195                 delete ARGV[i]
 196             }
 197             if (ARGC > 1) printf "\n"
 198             fflush()
 199         }
 200         { print; fflush() }
 201     ' "$@"
 202 }
 203 
 204 # play a repeating synthetic-bell-like sound lasting the number of seconds
 205 # given, or for 1 second by default; uses my script `waveout`
 206 bell() {
 207     local f='sin(880*tau*u) * exp(-10*u)'
 208     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 209 }
 210 
 211 # play a repeating sound with synthetic-bells, lasting the number of seconds
 212 # given, or for 1 second by default; uses my script `waveout`
 213 bells() {
 214     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 215     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 216 }
 217 
 218 # Breathe Header: add an empty line after the first one (the header), then
 219 # separate groups of 5 lines (by default) with empty lines between them
 220 bh() {
 221     local n="${1:-5}"
 222     [ $# -gt 0 ] && shift
 223     awk -v n="$n" '
 224         BEGIN { if (n == 0) n = -1 }
 225         (NR - 1) % n == 1 && NR > 1 { print "" }
 226         { print; fflush() }
 227     ' "$@"
 228 }
 229 
 230 # recursively find all files with at least the number of bytes given; when
 231 # not given a minimum byte-count, the default is 100 binary megabytes
 232 bigfiles() {
 233     local n
 234     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 235     [ $# -gt 0 ] && shift
 236 
 237     local arg
 238     for arg in "${@:-.}"; do
 239         if [ ! -d "${arg}" ]; then
 240             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 241             return 1
 242         fi
 243         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 244     done
 245 }
 246 
 247 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 248 bl() {
 249     local n="${1:-5}"
 250     [ $# -gt 0 ] && shift
 251     awk -v n="$n" '
 252         BEGIN { if (n == 0) n = -1 }
 253         NR % n == 1 && NR != 1 { print "" }
 254         { print; fflush() }
 255     ' "$@"
 256 }
 257 
 258 # process BLocks/paragraphs of non-empty lines with AWK
 259 # blawk() { awk -F='' -v RS='' "$@"; }
 260 
 261 # process BLocks/paragraphs of non-empty lines with AWK
 262 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 263 
 264 # emit a line with a repeating block-like symbol in it
 265 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 266 
 267 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 268 # my script `bsbs`
 269 bman() {
 270     local w
 271     w="$(tput cols)"
 272     if [ "$w" -gt 100 ]; then
 273         w="$((w / 2 - 1))"
 274     fi
 275     MANWIDTH="$w" man "$@" | bsbs 2
 276 }
 277 
 278 # Begin-Only Awk
 279 # boa() { awk "BEGIN { $1; exit }"; }
 280 
 281 # Begin-Only Awk
 282 boa() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 283 
 284 # Begin-Only AWK
 285 # boawk() { awk "BEGIN { $1; exit }"; }
 286 
 287 # Begin-Only AWK
 288 boawk() { stdbuf -oL awk "BEGIN { $1; exit }"; }
 289 
 290 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 291 # my script `bsbs`
 292 bookman() {
 293     local w
 294     w="$(tput cols)"
 295     if [ "$w" -gt 100 ]; then
 296         w="$((w / 2 - 1))"
 297     fi
 298     MANWIDTH="$w" man "$@" | bsbs 2
 299 }
 300 
 301 # split lines using the regex given, turning them into single-item lines
 302 breakdown() {
 303     local sep="${1:- }"
 304     [ $# -gt 0 ] && shift
 305     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 306 }
 307 
 308 # separate groups of 5 lines (by default) with empty lines
 309 breathe() {
 310     local n="${1:-5}"
 311     [ $# -gt 0 ] && shift
 312     awk -v n="$n" '
 313         BEGIN { if (n == 0) n = -1 }
 314         NR % n == 1 && NR != 1 { print "" }
 315         { print; fflush() }
 316     ' "$@"
 317 }
 318 
 319 # Browse Text
 320 bt() { less -JMKNiCRS "$@"; }
 321 
 322 # show a reverse-sorted tally of all lines read, where ties are sorted
 323 # alphabetically, and where trailing bullets are added to quickly make
 324 # the tally counts comparable at a glance
 325 bully() {
 326     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 327         # reassure users by instantly showing the header
 328         BEGIN { print "value\ttally\tbullets"; fflush() }
 329 
 330         { gsub(/\r$/, ""); tally[$0]++ }
 331 
 332         END {
 333             # find the max tally, which is needed to build the bullets-string
 334             max = 0
 335             for (k in tally) {
 336                 if (max < tally[k]) max = tally[k]
 337             }
 338 
 339             # make enough bullets for all tallies: this loop makes growing the
 340             # string a task with complexity O(n * log n), instead of a naive
 341             # O(n**2), which can slow-down things when tallies are high enough
 342             bullets = "•"
 343             for (n = max; n > 1; n /= 2) {
 344                 bullets = bullets bullets
 345             }
 346 
 347             # emit unsorted output lines to the sort cmd, which will emit the
 348             # final reverse-sorted tally lines
 349             for (k in tally) {
 350                 s = substr(bullets, 1, tally[k])
 351                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 352             }
 353         }
 354     ' "$@"
 355 }
 356 
 357 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 358 # second by default; uses my script `waveout`
 359 busy() {
 360     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 361     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 362     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 363     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 364 }
 365 
 366 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 367 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 368 
 369 # keep all BUT the LAST n lines, or skip just the last line by default
 370 butlast() { head -n -"${1:-1}" "${2:--}"; }
 371 
 372 # load bytes from the filenames given
 373 bytes() { cat "$@"; }
 374 
 375 # quick alias for `cat`
 376 c() { cat "$@"; }
 377 
 378 # CAlculator with Nice numbers runs my script `ca` and colors results with
 379 # my script `nn`, alternating styles to make long numbers easier to read
 380 can() { ca "$@" | nn --gray; }
 381 
 382 # uppercase the first letter on each line, and lowercase all later letters
 383 capitalize() {
 384     awk '{ print; fflush() }' "$@" | sed -E 's-^(.*)-\L\1-; s-^(.)-\u\1-'
 385 }
 386 
 387 # conCATenate Lines guarantees no lines are ever accidentally joined
 388 # across inputs, always emitting a line-feed at the end of every line
 389 # catl() { awk '{ print; fflush() }' "$@"; }
 390 
 391 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 392 # carriage-returns, and guarantees no lines are ever accidentally joined
 393 # across inputs, always emitting a line-feed at the end of every line
 394 catl() {
 395     awk '
 396         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 397         { gsub(/\r$/, ""); print; fflush() }
 398     ' "$@"
 399 }
 400 
 401 # Csv AWK: CSV-specific input settings for `awk`
 402 # cawk() { awk --csv "$@"; }
 403 
 404 # Csv AWK: CSV-specific input settings for `awk`
 405 cawk() { stdbuf -oL awk --csv "$@"; }
 406 
 407 # Compile C Stripped
 408 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 409 
 410 # center-align lines of text, using the current screen width
 411 center() {
 412     awk -v width="$(tput cols)" '
 413         {
 414             gsub(/\r$/, "")
 415             lines[NR] = $0
 416             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
 417             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
 418             l = length
 419             if (maxlen < l) maxlen = l
 420         }
 421 
 422         END {
 423             n = (width - maxlen) / 2
 424             if (n % 1) n = n - (n % 1)
 425             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
 426             for (i = 1; i <= NR; i++) printf fmt, "", lines[i]
 427         }
 428     ' "$@"
 429 }
 430 
 431 # Colored Go Test on the folder given; uses my command `gbmawk`
 432 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; }
 433 
 434 # ignore final life-feed from text, if it's the very last byte; also ignore
 435 # all trailing carriage-returns
 436 choplf() {
 437     awk '
 438         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 439         NR > 1 { print ""; fflush() }
 440         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 441     ' "$@"
 442 }
 443 
 444 # Color Json using the `jq` app, allowing an optional filepath as the data
 445 # source, and even an optional transformation formula
 446 cj() { jq -C "${2:-.}" "${1:--}"; }
 447 
 448 # clean the screen, after running the command given
 449 clean() {
 450     local res
 451     tput smcup
 452     "$@"
 453     res=$?
 454     tput rmcup
 455     return "${res}"
 456 }
 457 
 458 # show a live digital clock
 459 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 460 
 461 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 462 # also emitting colors when piped
 463 clrg() { rg --color=always --line-buffered "$@"; }
 464 
 465 # CLear Screen, like the old dos command of the same name
 466 cls() { clear; }
 467 
 468 # COunt COndition: count how many times the AWK expression given is true
 469 coco() {
 470     local cond="${1:-1}"
 471     [ $# -gt 0 ] && shift
 472     awk "
 473         { low = lower = tolower(\$0) }
 474         ${cond} { count++ }
 475         END { print count }
 476     " "$@"
 477 }
 478 
 479 # Colored RipGrep ensures app `rg` emits colors when piped
 480 crg() { rg --color=always --line-buffered "$@"; }
 481 
 482 # emit a line with a repeating cross-like symbol in it
 483 crosses() {
 484     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 485 }
 486 
 487 # split lines using the string given, turning them into single-item lines
 488 crumble() {
 489     local sep="${1:- }"
 490     [ $# -gt 0 ] && shift
 491     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 492 }
 493 
 494 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 495 csv2tsv() { xsv fmt -t '\t' "$@"; }
 496 
 497 # Change Units turns common US units into international ones; uses my
 498 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 499 cu() {
 500     bu "$@" | awk '
 501         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 502         NF == 4 && $NF != "s" { print $NF }
 503     ' | nn --gray
 504 }
 505 
 506 # CURL Silent spares you the progress bar, but still tells you about errors
 507 curls() { curl --show-error -s "$@"; }
 508 
 509 # Count With AWK: count the times the AWK expression/condition given is true
 510 cwawk() {
 511     local cond="${1:-1}"
 512     [ $# -gt 0 ] && shift
 513     awk "
 514         { low = lower = tolower(\$0) }
 515         ${cond} { count++ }
 516         END { print count }
 517     " "$@"
 518 }
 519 
 520 # listen to streaming DANCE music
 521 dance() {
 522     printf "streaming \e[7mDance Wave Retro\e[0m\n"
 523     # mpv --quiet https://retro.dancewave.online/retrodance.mp3
 524     mpv --really-quiet https://retro.dancewave.online/retrodance.mp3
 525 }
 526 
 527 # emit a line with a repeating dash-like symbol in it
 528 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 529 
 530 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 531 # the leading data-URI declaration, if present
 532 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 533 
 534 # DECAPitate (lines) emits the first line as is, piping all lines after that
 535 # to the command given, passing all/any arguments/options to it
 536 # decap() {
 537 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 538 # }
 539 
 540 # ignore whole-comment lines, or just trailing unix-style comments in them
 541 decomment() {
 542     awk '/^ *#/ { next } { gsub(/ *#.*$/, ""); print; fflush(); }' "$@"
 543 }
 544 
 545 # turn Comma-Separated-Values tables into tab-separated-values tables
 546 # decsv() { xsv fmt -t '\t' "$@"; }
 547 
 548 # DEDUPlicate prevents lines from appearing more than once
 549 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 550 
 551 # dictionary-DEFine the word given, using an online service
 552 def() {
 553     local arg
 554     local gap=0
 555     for arg in "$@"; do
 556         [ "${gap}" -gt 0 ] && printf "\n"
 557         gap=1
 558         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 559         curl -s "dict://dict.org/d:${arg}" | awk '
 560             { gsub(/\r$/, "") }
 561             /^151 / {
 562                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 563                 next
 564             }
 565             /^[1-9][0-9]{2} / {
 566                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 567                 next
 568             }
 569             { print; fflush() }
 570         '
 571     done | less -JMKiCRS
 572 }
 573 
 574 # dictionary-define the word given, using an online service
 575 define() {
 576     local arg
 577     local gap=0
 578     for arg in "$@"; do
 579         [ "${gap}" -gt 0 ] && printf "\n"
 580         gap=1
 581         printf "\e[7m%-80s\x1b[0m\n" "${arg}"
 582         curl -s "dict://dict.org/d:${arg}" | awk '
 583             { gsub(/\r$/, "") }
 584             /^151 / {
 585                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 586                 next
 587             }
 588             /^[1-9][0-9]{2} / {
 589                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 590                 next
 591             }
 592             { print; fflush() }
 593         '
 594     done | less -JMKiCRS
 595 }
 596 
 597 # DEcompress GZip-encoded data
 598 # degz() { zcat "$@"; }
 599 
 600 # turn JSON Lines into a proper json array
 601 dejsonl() { jq -s -M "${@:-.}"; }
 602 
 603 # delay lines from the standard-input, waiting the number of seconds given
 604 # for each line, or waiting 1 second by default
 605 # delay() {
 606 #     local seconds="${1:-1}"
 607 #     (
 608 #         IFS="$(printf "\n")"
 609 #         while read -r line; do
 610 #             sleep "${seconds}"
 611 #             printf "%s\n" "${line}"
 612 #         done
 613 #     )
 614 # }
 615 
 616 # convert lines of Space(s)-Separated Values into lines of tab-separated values
 617 dessv() {
 618     awk '
 619         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 620 
 621         {
 622             gsub(/\r$/, "")
 623             for (i = 1; i <= NF; i++) {
 624                 if (i > 1) printf "\t"
 625                 printf "%s", $i
 626             }
 627             printf "\n"; fflush()
 628         }
 629     ' "$@"
 630 }
 631 
 632 # expand tabs each into up to the number of space given, or 4 by default
 633 detab() { expand -t "${1:-4}"; }
 634 
 635 # ignore trailing spaces, as well as trailing carriage returns
 636 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 637 
 638 # turn UTF-16 data into UTF-8
 639 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 640 
 641 # DIVide 2 numbers 3 ways, including the complement
 642 div() {
 643     awk -v a="${1:-1}" -v b="${2:-1}" '
 644         BEGIN {
 645             gsub(/_/, "", a)
 646             gsub(/_/, "", b)
 647             if (a > b) { c = a; a = b; b = c }
 648             c = 1 - a / b
 649             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 650             else printf "%f\n%f\n", a / b, b / a
 651             exit
 652         }'
 653 }
 654 
 655 # get/fetch data from the filename or URI given; named `dog` because dogs can
 656 # `fetch` things for you
 657 # dog() {
 658 #     if [ $# -gt 1 ]; then
 659 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 660 #         return 1
 661 #     fi
 662 #
 663 #     if [ -e "$1" ]; then
 664 #         cat "$1"
 665 #         return $?
 666 #     fi
 667 #
 668 #     case "${1:--}" in
 669 #         -) cat -;;
 670 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 671 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 672 #         dict://*|telnet://*) curl --show-error -s "$1";;
 673 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 674 #         *) curl --show-error -s "https://$1";;
 675 #     esac 2> /dev/null || {
 676 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 677 #         return 1
 678 #     }
 679 # }
 680 
 681 # emit a line with a repeating dot-like symbol in it
 682 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 683 
 684 # ignore/remove all matched regexes given on all stdin lines
 685 drop() {
 686     awk '
 687         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 688         {
 689             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 690             print; fflush()
 691         }
 692     ' "${@:-\r$}"
 693 }
 694 
 695 # show the current Date and Time
 696 dt() {
 697     printf "\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n" \
 698         "$(date +'%a %b %d')" "$(date +%T)"
 699 }
 700 
 701 # show the current Date, Time, and a Calendar with the 3 `current` months
 702 dtc() {
 703     {
 704         # show the current date/time center-aligned
 705         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
 706             "" "$(date +'%a %b %d')" "$(date +%T)"
 707         # debian linux has a different `cal` app which highlights the day
 708         if [ -e "/usr/bin/ncal" ]; then
 709             # fix debian/ncal's weird way to highlight the current day
 710             ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
 711         else
 712             cal -3
 713         fi
 714     } | less -JMKiCRS
 715 }
 716 
 717 # quick alias for `echo`
 718 e() { echo "$@"; }
 719 
 720 e4() { expand -t 4 "$@"; }
 721 
 722 e8() { expand -t 8 "$@"; }
 723 
 724 # Evaluate Awk expression
 725 ea() {
 726     local expr="${1:-0}"
 727     [ $# -gt 0 ] && shift
 728     awk "BEGIN { print ${expr}; exit }" "$@"
 729 }
 730 
 731 # EDit RUN shell commands, using an interactive editor
 732 edrun() { . <( micro -readonly true -filetype shell | leak --inv ); }
 733 
 734 # Extended-mode Grep, enabling its full regex syntax
 735 eg() { grep -E --line-buffered "$@"; }
 736 
 737 # Extended Grep, Recursive Interactive and Plain
 738 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 739 
 740 # show all empty files in a folder, digging recursively
 741 emptyfiles() {
 742     local arg
 743     for arg in "${@:-.}"; do
 744         if [ ! -d "${arg}" ]; then
 745             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 746             return 1
 747         fi
 748         stdbuf -oL find "${arg}" -type f -empty
 749     done
 750 }
 751 
 752 # show all empty folders in a folder, digging recursively
 753 emptyfolders() {
 754     local arg
 755     for arg in "${@:-.}"; do
 756         if [ ! -d "${arg}" ]; then
 757             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 758             return 1
 759         fi
 760         stdbuf -oL find "${arg}" -type d -empty
 761     done
 762 }
 763 
 764 # Evaluate Nodejs expression
 765 # en() {
 766 #     local expr="${1:-null}"
 767 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 768 #     node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 769 # }
 770 
 771 # Evaluate Python expression
 772 ep() { python -c "print(${1:-None})"; }
 773 
 774 # Extended Plain Interactive Grep
 775 epig() { ugrep --color=never -Q -E "$@"; }
 776 
 777 # Extended Plain Recursive Interactive Grep
 778 eprig() { ugrep --color=never -Q -E "$@"; }
 779 
 780 # Evaluate Ruby expression
 781 # er() { ruby -e "puts ${1:-nil}"; }
 782 
 783 # Edit Run shell commands, using an interactive editor
 784 er() { . <( micro -readonly true -filetype shell | leak --inv ); }
 785 
 786 # ignore/remove all matched regexes given on all stdin lines
 787 erase() {
 788     awk '
 789         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 790         {
 791             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 792             print; fflush()
 793         }
 794     ' "${@:-\r$}"
 795 }
 796 
 797 # Editor Read-Only
 798 ero() { micro -readonly true "$@"; }
 799 
 800 # Extended-mode Sed, enabling its full regex syntax
 801 es() { sed -E -u "$@"; }
 802 
 803 # Expand Tabs each into up to the number of space given, or 4 by default
 804 et() { expand -t "${1:-4}"; }
 805 
 806 # convert EURos into CAnadian Dollars, using the latest official exchange
 807 # rates from the bank of canada; during weekends, the latest rate may be
 808 # from a few days ago; the default amount of euros to convert is 1, when
 809 # not given
 810 eur2cad() {
 811     local site='https://www.bankofcanada.ca/valet/observations/group'
 812     local csv_rates="${site}/FX_RATES_DAILY/csv"
 813     local url
 814     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 815     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 816         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 817         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 818 }
 819 
 820 # EValuate AWK expression
 821 evawk() {
 822     local expr="${1:-0}"
 823     [ $# -gt 0 ] && shift
 824     awk "BEGIN { print ${expr}; exit }" "$@"
 825 }
 826 
 827 # get various currency EXchange RATES
 828 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/$1"; }
 829 
 830 # get various currency EXchange RATES
 831 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-USD}"; }
 832 
 833 # get various currency EXchange RATES
 834 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-EUR}"; }
 835 
 836 # get various currency EXchange RATES
 837 exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-CAD}"; }
 838 
 839 # convert fahrenheit into celsius
 840 fahrenheit() {
 841     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 842         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 843 }
 844 
 845 # Flushed AWK
 846 fawk() { stdbuf -oL awk "$@"; }
 847 
 848 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 849 fetch() {
 850     local a
 851     for a in "$@"; do
 852         case "$a" in
 853             file://*|https://*|http://*) curl --show-error -s "$a";;
 854             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 855             dict://*|telnet://*) curl --show-error -s "$a";;
 856             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 857             *) curl --show-error -s "https://$a";;
 858         esac
 859     done
 860 }
 861 
 862 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 863 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 864 
 865 # show all files in a folder, digging recursively
 866 files() {
 867     local arg
 868     for arg in "${@:-.}"; do
 869         if [ ! -d "${arg}" ]; then
 870             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 871             return 1
 872         fi
 873         stdbuf -oL find "${arg}" -type f
 874     done
 875 }
 876 
 877 # recursively find all files with fewer bytes than the number given
 878 filesunder() {
 879     local n
 880     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 881     [ $# -gt 0 ] && shift
 882 
 883     local arg
 884     for arg in "${@:-.}"; do
 885         if [ ! -d "${arg}" ]; then
 886             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 887             return 1
 888         fi
 889         stdbuf -oL find "${arg}" -type f -size -"$n"c
 890     done
 891 }
 892 
 893 # get the first n lines, or 1 by default
 894 first() { head -n "${1:-1}" "${2:--}"; }
 895 
 896 # limit data up to the first n bytes
 897 firstbytes() { head -c "$1" "${2:--}"; }
 898 
 899 # get the first n lines, or 1 by default
 900 firstlines() { head -n "${1:-1}" "${2:--}"; }
 901 
 902 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 903 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 904 # and ensuring each input's last line ends with a line-feed
 905 fixlines() {
 906     awk '
 907         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 908         { gsub(/\r$/, ""); print; fflush() }
 909     ' "$@"
 910 }
 911 
 912 # FLushed AWK
 913 # flawk() { stdbuf -oL awk "$@"; }
 914 
 915 # First Line AWK, emits the first line as is, and uses the rest of the args
 916 # given by injecting the first into the script, and passing all later args as
 917 # later args to `awk` as given
 918 flawk() {
 919     local code="${1:-1}"
 920     [ $# -gt 0 ] && shift
 921     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 922 }
 923 
 924 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 925 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 926 # involving several steps
 927 fleak() {
 928     awk '
 929         {
 930             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 931             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 932             print; fflush()
 933         }
 934     ' "$@"
 935 }
 936 
 937 # try to run the command given using line-buffering for its (standard) output
 938 flushlines() { stdbuf -oL "$@"; }
 939 
 940 # show all folders in a folder, digging recursively
 941 folders() {
 942     local arg
 943     for arg in "${@:-.}"; do
 944         if [ ! -d "${arg}" ]; then
 945             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 946             return 1
 947         fi
 948         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 949     done
 950 }
 951 
 952 # start from the line number given, skipping all previous ones
 953 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 954 
 955 # convert FeeT into meters
 956 ft() {
 957     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 958         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 959 }
 960 
 961 # convert FeeT² (squared) into meters²
 962 ft2() {
 963     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 964         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 965 }
 966 
 967 # Get/fetch data from the filenames/URIs given; uses my script `get`
 968 # g() { get "$@"; }
 969 
 970 # run `grep` in extended-regex mode, enabling its full regex syntax
 971 # g() { grep -E --line-buffered "$@"; }
 972 
 973 # convert GALlons into liters
 974 gal() {
 975     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 976         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 977 }
 978 
 979 # convert binary GigaBytes into bytes
 980 gb() {
 981     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 982         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 983         sed 's-\.00*$--'
 984 }
 985 
 986 # glue/stick together various lines, only emitting a line-feed at the end; an
 987 # optional argument is the output-item-separator, which is empty by default
 988 glue() {
 989     local sep="${1:-}"
 990     [ $# -gt 0 ] && shift
 991     awk -v sep="${sep}" '
 992         NR > 1 { printf "%s", sep }
 993         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 994         END { if (NR > 0) print ""; fflush() }
 995     ' "$@"
 996 }
 997 
 998 # GO Build Stripped: a common use-case for the go compiler
 999 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
1000 
1001 # GO DEPendencieS: show all dependencies in a go project
1002 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
1003 
1004 # GO IMPortS: show all imports in a go project
1005 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
1006 
1007 # go to the folder picked using an interactive TUI; uses my script `bf`
1008 goto() {
1009     local where
1010     where="$(bf "${1:-.}")"
1011     if [ $? -ne 0 ]; then
1012         return 0
1013     fi
1014 
1015     where="$(realpath "${where}")"
1016     if [ ! -d "${where}" ]; then
1017         where="$(dirname "${where}")"
1018     fi
1019     cd "${where}" || return
1020 }
1021 
1022 # GRayed-out lines with AWK
1023 grawk() {
1024     local cond="${1:-1}"
1025     [ $# -gt 0 ] && shift
1026     awk "${cond}"' {
1027             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
1028             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
1029             next
1030         }
1031         { print; fflush() }
1032     ' "$@"
1033 }
1034 
1035 # Style lines using a GRAY-colored BACKground
1036 grayback() {
1037     awk '
1038         {
1039             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
1040             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
1041         }
1042     ' "$@"
1043 }
1044 
1045 # Grep, Recursive Interactive and Plain
1046 # grip() { ugrep -r -Q --color=never -E "$@"; }
1047 
1048 # Global extended regex SUBstitute, using the AWK function of the same name:
1049 # arguments are used as regex/replacement pairs, in that order
1050 gsub() {
1051     awk '
1052         BEGIN {
1053             for (i = 1; i < ARGC; i++) {
1054                 args[++n] = ARGV[i]
1055                 delete ARGV[i]
1056             }
1057         }
1058         {
1059             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
1060             print; fflush()
1061         }
1062     ' "$@"
1063 }
1064 
1065 # show Help laid out on 2 side-by-side columns; uses my script `bsbs`
1066 h2() { naman "$@" | bsbs 2; }
1067 
1068 # Highlight (lines) with AWK
1069 hawk() {
1070     local cond="${1:-1}"
1071     [ $# -gt 0 ] && shift
1072     awk '
1073         { low = lower = tolower($0) }
1074         '"${cond}"' {
1075             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1076             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1077             next
1078         }
1079         { print; fflush() }
1080     ' "$@"
1081 }
1082 
1083 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1084 # second by default; uses my script `waveout`
1085 heartbeat() {
1086     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1087     local b='((12, u), (8, (u-0.25)%1))'
1088     local f="sum($a for v in $b) / 2"
1089     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1090     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1091     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1092 }
1093 
1094 # Highlighted-style ECHO
1095 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1096 
1097 # show each byte as a pair of HEXadecimal (base-16) symbols
1098 hexify() {
1099     cat "$@" | od -x -A n |
1100         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1101 }
1102 
1103 # HIghlighted-style ECHO
1104 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1105 
1106 # highlight lines
1107 highlight() {
1108     awk '
1109         {
1110             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1111             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1112         }
1113     ' "$@"
1114 }
1115 
1116 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1117 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1118 # involving several steps
1119 hileak() {
1120     awk '
1121         {
1122             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1123             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1124             print; fflush()
1125         }
1126     ' "$@"
1127 }
1128 
1129 # highlight lines
1130 hilite() {
1131     awk '
1132         {
1133             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1134             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1135         }
1136     ' "$@"
1137 }
1138 
1139 # Help Me Remember my custom shell commands
1140 hmr() {
1141     local cmd="bat"
1142     # debian linux uses a different name for the `bat` app
1143     if [ -e "/usr/bin/batcat" ]; then
1144         cmd="batcat"
1145     fi
1146 
1147     "$cmd" \
1148         --style=plain,header,numbers --theme='Monokai Extended Light' \
1149         --wrap=never --color=always "$(which clam)" |
1150             sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1151 }
1152 
1153 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1154 hms() {
1155     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1156         x = $0
1157         h = (x - x % 3600) / 3600
1158         m = (x % 3600) / 60
1159         s = x % 60
1160         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1161     }'
1162 }
1163 
1164 # find all hyperlinks inside HREF attributes in the input text
1165 href() {
1166     awk '
1167         BEGIN { e = "href=\"[^\"]+\"" }
1168         {
1169             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1170                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1171             }
1172         }
1173     ' "$@"
1174 }
1175 
1176 # Index all lines starting from 0, using a tab right after each line number
1177 # i() {
1178 #     local start="${1:-0}"
1179 #     [ $# -gt 0 ] && shift
1180 #     nl -b a -w 1 -v "${start}" "$@"
1181 # }
1182 
1183 # Index all lines starting from 0, using a tab right after each line number
1184 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1185 
1186 # avoid/ignore lines which case-insensitively match any of the regexes given
1187 iavoid() {
1188     awk '
1189         BEGIN {
1190             if (IGNORECASE == "") {
1191                 m = "this variant of AWK lacks case-insensitive regex-matching"
1192                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1193                 exit 125
1194             }
1195             IGNORECASE = 1
1196 
1197             for (i = 1; i < ARGC; i++) {
1198                 e[i] = ARGV[i]
1199                 delete ARGV[i]
1200             }
1201         }
1202 
1203         {
1204             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1205             print; fflush(); got++
1206         }
1207 
1208         END { exit(got == 0) }
1209     ' "${@:-^\r?$}"
1210 }
1211 
1212 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1213 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1214 
1215 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1216 idrop() {
1217     awk '
1218         BEGIN {
1219             if (IGNORECASE == "") {
1220                 m = "this variant of AWK lacks case-insensitive regex-matching"
1221                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1222                 exit 125
1223             }
1224             IGNORECASE = 1
1225 
1226             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1227         }
1228 
1229         {
1230             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1231             print; fflush()
1232         }
1233     ' "${@:-\r$}"
1234 }
1235 
1236 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1237 ierase() {
1238     awk '
1239         BEGIN {
1240             if (IGNORECASE == "") {
1241                 m = "this variant of AWK lacks case-insensitive regex-matching"
1242                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1243                 exit 125
1244             }
1245             IGNORECASE = 1
1246 
1247             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1248         }
1249 
1250         {
1251             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1252             print; fflush()
1253         }
1254     ' "${@:-\r$}"
1255 }
1256 
1257 # ignore command in a pipe: this allows quick re-editing of pipes, while
1258 # still leaving signs of previously-used steps, as a memo
1259 ignore() { cat; }
1260 
1261 # only keep lines which case-insensitively match any of the regexes given
1262 imatch() {
1263     awk '
1264         BEGIN {
1265             if (IGNORECASE == "") {
1266                 m = "this variant of AWK lacks case-insensitive regex-matching"
1267                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1268                 exit 125
1269             }
1270             IGNORECASE = 1
1271 
1272             for (i = 1; i < ARGC; i++) {
1273                 e[i] = ARGV[i]
1274                 delete ARGV[i]
1275             }
1276         }
1277 
1278         {
1279             for (i = 1; i < ARGC; i++) {
1280                 if ($0 ~ e[i]) {
1281                     print; fflush()
1282                     got++
1283                     next
1284                 }
1285             }
1286         }
1287 
1288         END { exit(got == 0) }
1289     ' "${@:-[^\r]}"
1290 }
1291 
1292 # start each non-empty line with extra n spaces
1293 indent() {
1294     awk '
1295         BEGIN {
1296             n = ARGV[1] + 0
1297             delete ARGV[1]
1298             fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0)
1299         }
1300 
1301         /^\r?$/ { print ""; fflush(); next }
1302         { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() }
1303     ' "$@"
1304 }
1305 
1306 # listen to INTENSE streaming radio
1307 intense() {
1308     printf "streaming \e[7mIntense Radio\e[0m\n"
1309     mpv --quiet https://secure.live-streams.nl/flac.flac
1310 }
1311 
1312 # show public-IP-related INFOrmation
1313 # ipinfo() { curl -s ipinfo.io; }
1314 
1315 # show public-IP-related INFOrmation
1316 ipinfo() { curl -s ipinfo.io | jq; }
1317 
1318 # emit each word-like item from each input line on its own line; when a file
1319 # has tabs on its first line, items are split using tabs alone, which allows
1320 # items to have spaces in them
1321 items() {
1322     awk '
1323         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
1324         { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() }
1325     ' "$@"
1326 }
1327 
1328 # case-insensitively deduplicate lines, keeping them in their original order:
1329 # the checking/matching is case-insensitive, but each first match is output
1330 # exactly as is
1331 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1332 
1333 # shrink/compact Json data, allowing an optional filepath
1334 # j0() { python -m json.tool --compact "${1:--}"; }
1335 
1336 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1337 # even an optional transformation formula after that
1338 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1339 
1340 # show Json data on multiple lines, using 2 spaces for each indentation level,
1341 # allowing an optional filepath
1342 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1343 
1344 # show Json data on multiple lines, using 2 spaces for each indentation level,
1345 # allowing an optional filepath, and even an optional transformation formula
1346 # after that
1347 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1348 
1349 # listen to streaming JAZZ music
1350 jazz() {
1351     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1352     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1353     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1354 }
1355 
1356 # show a `dad` JOKE from the web, sometimes even a very funny one
1357 # joke() {
1358 #     curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--'
1359 #     # plain-text output from previous cmd doesn't end with a line-feed
1360 #     printf "\n"
1361 # }
1362 
1363 # show a `dad` JOKE from the web, sometimes even a very funny one
1364 joke() {
1365     curl --show-error -s https://icanhazdadjoke.com | fold -s |
1366         awk '{ gsub(/ *\r?$/, ""); print }'
1367 }
1368 
1369 # shrink/compact JSON data, allowing an optional filepath
1370 # json0() { python -m json.tool --compact "${1:--}"; }
1371 
1372 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1373 # even an optional transformation formula after that
1374 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1375 
1376 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1377 # allowing an optional filepath
1378 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1379 
1380 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1381 # allowing an optional filepath, and even an optional transformation formula
1382 # after that
1383 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1384 
1385 # turn JSON Lines into a proper JSON array
1386 jsonl2json() { jq -s -M "${@:-.}"; }
1387 
1388 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1389 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1390 
1391 # only keep the file-extension part from lines ending with file-extensions
1392 # justext() {
1393 #     awk '
1394 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1395 #     ' "$@"
1396 # }
1397 
1398 # only keep the file-extension part from lines ending with file-extensions
1399 justext() {
1400     awk '
1401         !/^\./ && /\./ {
1402             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1403                 print substr($0, RSTART, RLENGTH); fflush()
1404             }
1405         }
1406     ' "$@"
1407 }
1408 
1409 # only keep lines ending with a file-extension of any popular picture format
1410 justpictures() {
1411     awk '
1412         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1413             gsub(/ *\r?$/, ""); print; fflush()
1414         }
1415     ' "$@"
1416 }
1417 
1418 # only keep lines ending with a file-extension of any popular sound format
1419 justsounds() {
1420     awk '
1421         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1422             gsub(/ *\r?$/, ""); print; fflush()
1423         }
1424     ' "$@"
1425 }
1426 
1427 # only keep lines ending with a file-extension of any popular video format
1428 justvideos() {
1429     awk '
1430         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1431             gsub(/ *\r?$/, ""); print; fflush()
1432         }
1433     ' "$@"
1434 }
1435 
1436 # convert binary KiloBytes into bytes
1437 kb() {
1438     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1439         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1440         sed 's-\.00*$--'
1441 }
1442 
1443 # run `less`, showing line numbers, among other settings
1444 l() { less -JMKNiCRS "$@"; }
1445 
1446 # Like A Book groups lines as 2 side-by-side pages, the same way books
1447 # do it; uses my script `book`
1448 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1449 
1450 # find the LAN (local-area network) IP address for this device
1451 lanip() { hostname -I; }
1452 
1453 # Line xARGS: `xargs` using line separators, which handles filepaths
1454 # with spaces, as long as the standard input has 1 path per line
1455 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; }
1456 
1457 # Line xARGS: `xargs` using line separators, which handles filepaths
1458 # with spaces, as long as the standard input has 1 path per line
1459 largs() {
1460     awk -v ORS='\000' '
1461         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1462         { gsub(/\r$/, ""); print; fflush() }
1463     ' | xargs -0 "$@"
1464 }
1465 
1466 # get the last n lines, or 1 by default
1467 last() { tail -n "${1:-1}" "${2:--}"; }
1468 
1469 # get up to the last given number of bytes
1470 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1471 
1472 # get the last n lines, or 1 by default
1473 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1474 
1475 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1476 # as given, and where all other symbols become question marks, one question
1477 # mark for each code-point byte
1478 latinize() {
1479     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1480 }
1481 
1482 # Lowercased (lines) AWK
1483 lawk() {
1484     local code="${1:-1}"
1485     [ $# -gt 0 ] && shift
1486     awk "
1487         {
1488             line = orig = original = \$0
1489             low = lower = tolower(\$0)
1490             \$0 = lower
1491         }
1492         ${code}
1493         { fflush() }
1494     " "$@";
1495 }
1496 
1497 # convert pounds (LB) into kilograms
1498 lb() {
1499     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1500         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1501 }
1502 
1503 # turn the first n space-separated fields on each line into tab-separated
1504 # ones; this behavior is useful to make the output of many cmd-line tools
1505 # into TSV, since filenames are usually the last fields, and these may
1506 # contain spaces which aren't meant to be split into different fields
1507 leadtabs() {
1508     local n="$1"
1509     local cmd
1510     cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1511     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1512     sed -u -E "${cmd}"
1513 }
1514 
1515 # run `less`, showing line numbers, among other settings
1516 least() { less -JMKNiCRS "$@"; }
1517 
1518 # limit stops at the first n bytes, or 1024 bytes by default
1519 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1520 
1521 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1522 # and using the first n lines as a sticky-header (1 by default), so they
1523 # always show on top
1524 lh() {
1525     local n="${1:-1}"
1526     [ $# -gt 0 ] && shift
1527     less --header="$n" -JMKNiCRS "$@"
1528 }
1529 
1530 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1531 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1532 # and ensuring each input's last line ends with a line-feed
1533 lines() {
1534     awk '
1535         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1536         { gsub(/\r$/, ""); print; fflush() }
1537     ' "$@"
1538 }
1539 
1540 # regroup adjacent lines into n-item tab-separated lines
1541 lineup() {
1542     local n="${1:-0}"
1543     [ $# -gt 0 ] && shift
1544 
1545     if [ "$n" -le 0 ]; then
1546         awk '
1547             NR > 1 { printf "\t" }
1548             { printf "%s", $0; fflush() }
1549             END { if (NR > 0) print "" }
1550         ' "$@"
1551         return $?
1552     fi
1553 
1554     awk -v n="$n" '
1555         NR % n != 1 && n > 1 { printf "\t" }
1556         { printf "%s", $0; fflush() }
1557         NR % n == 0 { print ""; fflush() }
1558         END { if (NR % n != 0) print "" }
1559     ' "$@"
1560 }
1561 
1562 # find all hyperLINKS (https:// and http://) in the input text
1563 links() {
1564     awk '
1565         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1566         {
1567             # match all links in the current line
1568             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1569                 print substr(s, RSTART, RLENGTH); fflush()
1570             }
1571         }
1572     ' "$@"
1573 }
1574 
1575 # List files, using the `Long` option
1576 # ll() { ls -l "$@"; }
1577 
1578 # LOAD data from the filename or URI given; uses my script `get`
1579 load() { get "$@"; }
1580 
1581 # LOwercase line, check (awk) COndition: on each success, the original line
1582 # is output with its original letter-casing, as its lower-cased version is
1583 # only a convenience meant for the condition
1584 loco() {
1585     local cond="${1:-1}"
1586     [ $# -gt 0 ] && shift
1587     awk "
1588         {
1589             line = orig = original = \$0
1590             low = lower = tolower(\$0)
1591             \$0 = lower
1592         }
1593         ${cond} { print line; fflush() }
1594     " "$@"
1595 }
1596 
1597 # LOcal SERver webserves files in a folder as localhost, using the port
1598 # number given, or port 8080 by default
1599 loser() {
1600     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1601     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1602 }
1603 
1604 # LOWercase all ASCII symbols
1605 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1606 
1607 # LOWERcase all ASCII symbols
1608 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1609 
1610 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1611 lrg() { rg --line-buffered "$@"; }
1612 
1613 # Listen To Youtube
1614 lty() {
1615     local url
1616     # some youtube URIs end with extra playlist/tracker parameters
1617     url="$(echo "$1" | sed 's-&.*--')"
1618     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1619 }
1620 
1621 # only keep lines which match any of the regexes given
1622 match() {
1623     awk '
1624         BEGIN {
1625             for (i = 1; i < ARGC; i++) {
1626                 e[i] = ARGV[i]
1627                 delete ARGV[i]
1628             }
1629         }
1630 
1631         {
1632             for (i = 1; i < ARGC; i++) {
1633                 if ($0 ~ e[i]) {
1634                     print; fflush()
1635                     got++
1636                     next
1637                 }
1638             }
1639         }
1640 
1641         END { exit(got == 0) }
1642     ' "${@:-[^\r]}"
1643 }
1644 
1645 # MAX Width truncates lines up to the given number of items/bytes given, or up
1646 # to 80 by default; output lines end with an ANSI reset-code, in case input
1647 # lines use ANSI styles
1648 maxw() {
1649     local maxwidth="${1:-80}"
1650     [ $# -gt 0 ] && shift
1651     awk -v maxw="${maxwidth}" '
1652         {
1653             gsub(/\r$/, "")
1654             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1655         }
1656     ' "$@"
1657 }
1658 
1659 # convert binary MegaBytes into bytes
1660 mb() {
1661     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1662         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1663         sed 's-\.00*$--'
1664 }
1665 
1666 # Multi-Core MAKE runs `make` using all cores
1667 mcmake() { make -j "$(nproc)" "$@"; }
1668 
1669 # Multi-Core MaKe runs `make` using all cores
1670 mcmk() { make -j "$(nproc)" "$@"; }
1671 
1672 # merge stderr into stdout, without any ugly keyboard-dancing
1673 # merrge() { "$@" 2>&1; }
1674 
1675 # convert MIles into kilometers
1676 mi() {
1677     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1678         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1679 }
1680 
1681 # convert MIles² (squared) into kilometers²
1682 mi2() {
1683     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1684         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1685 }
1686 
1687 # Make In Folder
1688 mif() {
1689     local code
1690     pushd "${1:-.}" > /dev/null || return
1691     [ $# -gt 0 ] && shift
1692     make "$@"
1693     code=$?
1694     popd > /dev/null || return "${code}"
1695     return "${code}"
1696 }
1697 
1698 # Media INFO
1699 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1700 
1701 # Media INFO
1702 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1703 
1704 # run `make`
1705 mk() { make "$@"; }
1706 
1707 # convert Miles Per Hour into kilometers per hour
1708 mph() {
1709     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1710         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1711 }
1712 
1713 # Number all lines, using a tab right after each line number
1714 # n() {
1715 #     local start="${1:-1}"
1716 #     [ $# -gt 0 ] && shift
1717 #     nl -b a -w 1 -v "${start}" "$@"
1718 # }
1719 
1720 # Number all lines, using a tab right after each line number
1721 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1722 
1723 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1724 naman() {
1725     local w
1726     w="$(tput cols)"
1727     if [ "$w" -gt 100 ]; then
1728         w="$((w / 2 - 1))"
1729     fi
1730     MANWIDTH="$w" man "$@"
1731 }
1732 
1733 # Not AND sorts its 2 inputs, then finds lines not in common
1734 nand() {
1735     # comm -3 <(sort "$1") <(sort "$2")
1736     # dash doesn't support the process-sub syntax
1737     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1738 }
1739 
1740 # Nice Byte Count, using my scripts `nn` and `cext`
1741 nbc() { wc -c "$@" | nn --gray | cext; }
1742 
1743 # listen to streaming NEW WAVE music
1744 newwave() {
1745     printf "streaming \e[7mNew Wave radio\e[0m\n"
1746     mpv --quiet https://puma.streemlion.com:2910/stream
1747 }
1748 
1749 # NIce(r) COlumns makes the output of many commands whose output starts with
1750 # a header line easier to read; uses my script `nn`
1751 nico() {
1752     awk '
1753         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1754         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1755     ' "$@" | nn --gray | less -JMKiCRS
1756 }
1757 
1758 # emit nothing to output and/or discard everything from input
1759 nil() {
1760     if [ $# -gt 0 ]; then
1761         "$@" > /dev/null
1762     else
1763         cat < /dev/null
1764     fi
1765 }
1766 
1767 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1768 njnn() { nj "$@" | nn --gray; }
1769 
1770 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1771 nman() {
1772     local w
1773     w="$(tput cols)"
1774     if [ "$w" -gt 100 ]; then
1775         w="$((w / 2 - 1))"
1776     fi
1777     MANWIDTH="$w" man "$@"
1778 }
1779 
1780 # convert Nautical MIles into kilometers
1781 nmi() {
1782     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1783         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1784 }
1785 
1786 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1787 # noerr() { "$@" 2> /dev/null; }
1788 
1789 # play a white-noise sound lasting the number of seconds given, or for 1
1790 # second by default; uses my script `waveout`
1791 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1792 
1793 # ignore trailing spaces, as well as trailing carriage returns
1794 notrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
1795 
1796 # show the current date and time
1797 now() { date +'%Y-%m-%d %H:%M:%S'; }
1798 
1799 # Nice Processes shows/lists all current processes; uses my script `nn`
1800 np() {
1801     local res
1802     local code
1803     # res="$(ps "${@:-auxf}")"
1804     res="$(ps "${@:-aux}")"
1805     code=$?
1806     if [ "${code}" -ne 0 ]; then
1807         return "${code}"
1808     fi
1809 
1810     echo "${res}" | awk '
1811         BEGIN {
1812             d = strftime("%a %b %d")
1813             t = strftime("%H:%M:%S")
1814             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1815         }
1816 
1817         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1818 
1819         $1 == "root" {
1820             gsub(/^/, "\x1b[38;2;52;101;164m")
1821             gsub(/ +/, "&\x1b[0m\x1b[38;2;52;101;164m")
1822             gsub(/$/, "\x1b[0m")
1823         }
1824 
1825         {
1826             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1827             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1828             printf "%3d  %s\n", NR - 1, $0
1829         }
1830     ' | nn --gray | less -JMKiCRS
1831 }
1832 
1833 # Nice Size, using my scripts `nn` and `cext`
1834 ns() { wc -c "$@" | nn --gray | cext; }
1835 
1836 # Nice SystemCtl Status
1837 nscs() {
1838     systemctl status "$@" 2>&1 | sed 's-\x1b\[[^A-Za-z][A-Za-z]--g' | sed -E \
1839         -e 's-(^[^ ] )([a-z0-9-]+\.service)-\1\x1b[7m\2\x1b[0m-' \
1840         -e 's- (enabled)- \x1b[38;2;0;135;95m\1\x1b[0m-g' \
1841         -e 's- (disabled)- \x1b[38;2;215;95;0m\1\x1b[0m-g' \
1842         -e 's- (active \(running\))- \x1b[38;2;0;135;95m\1\x1b[0m-g' \
1843         -e 's- (inactive \(dead\))- \x1b[38;2;204;0;0m\1\x1b[0m-g' \
1844         -e 's-^(Unit .* could not .*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' |
1845             less -JMKiCRS
1846 }
1847 
1848 # Nice Systemctl Status
1849 nss() {
1850     systemctl status "$@" 2>&1 | sed 's-\x1b\[[^A-Za-z][A-Za-z]--g' | sed -E \
1851         -e 's-(^[^ ] )([a-z0-9-]+\.service)-\1\x1b[7m\2\x1b[0m-' \
1852         -e 's- (enabled)- \x1b[38;2;0;135;95m\1\x1b[0m-g' \
1853         -e 's- (disabled)- \x1b[38;2;215;95;0m\1\x1b[0m-g' \
1854         -e 's- (active \(running\))- \x1b[38;2;0;135;95m\1\x1b[0m-g' \
1855         -e 's- (inactive \(dead\))- \x1b[38;2;204;0;0m\1\x1b[0m-g' \
1856         -e 's-^(Unit .* could not .*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' |
1857             less -JMKiCRS
1858 }
1859 
1860 # Nice Transform Json, using my scripts `tj`, and `nj`
1861 ntj() { tj "$@" | nj; }
1862 
1863 # Nice TimeStamp
1864 nts() {
1865     ts '%Y-%m-%d %H:%M:%S' |
1866         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1867 }
1868 
1869 # emit nothing to output and/or discard everything from input
1870 null() {
1871     if [ $# -gt 0 ]; then
1872         "$@" > /dev/null
1873     else
1874         cat < /dev/null
1875     fi
1876 }
1877 
1878 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1879 # line-feed byte
1880 nullines() {
1881     awk -v ORS='\000' '
1882         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1883         { gsub(/\r$/, ""); print; fflush() }
1884     ' "$@"
1885 }
1886 
1887 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1888 # the syntax of shell functions
1889 nwat() {
1890     local a
1891     local gap=0
1892 
1893     if [ $# -eq 0 ]; then
1894         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1895         return 1
1896     fi
1897 
1898     local cmd="bat"
1899     # debian linux uses a different name for the `bat` app
1900     if [ -e "/usr/bin/batcat" ]; then
1901         cmd="batcat"
1902     fi
1903 
1904     for a in "$@"; do
1905         [ "${gap}" -gt 0 ] && printf "\n"
1906         gap=1
1907         # printf "\e[7m%-80s\e[0m\n" "$a"
1908         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1909 
1910         # resolve 1 alias level
1911         if alias "$a" 2> /dev/null > /dev/null; then
1912             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1913         fi
1914 
1915         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1916             # resolved aliases with args/spaces in them would otherwise fail
1917             echo "$a"
1918         elif whence -f "$a" > /dev/null 2> /dev/null; then
1919             # zsh seems to show a shell function's code only via `whence -f`
1920             whence -f "$a"
1921         elif type "$a" > /dev/null 2> /dev/null; then
1922             # dash doesn't support `declare`, and `type` in bash emits
1923             # a redundant first output line, when it's a shell function
1924             type "$a" | awk '
1925                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1926                 { print; fflush() }
1927                 END { if (NR < 2 && skipped) print skipped }
1928             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1929                 --wrap=never --color=always |
1930                     sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1931         else
1932             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1933         fi
1934     done | less -JMKiCRS
1935 }
1936 
1937 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1938 # alternating styles to make long numbers easier to read
1939 # nwc() { wc "$@" | nn --gray; }
1940 
1941 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1942 # alternating styles to make long numbers easier to read
1943 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1944 
1945 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1946 # `cext`, alternating styles to make long numbers easier to read
1947 nwc() {
1948     wc "$@" | sort -rn | nn --gray | cext |
1949         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1950 }
1951 
1952 # Nice Weather Forecast
1953 nwf() {
1954     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
1955     curl --show-error -s telnet://graph.no:79 |
1956     sed -E \
1957         -e 's/ *\r?$//' \
1958         -e '/^\[/d' \
1959         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
1960         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
1961         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
1962         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
1963         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
1964         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
1965         -e 's/\*/○/g' |
1966     awk 1 |
1967     less -JMKiCRS
1968 }
1969 
1970 # Nice Zoom Json, using my scripts `zj`, and `nj`
1971 nzj() { zj "$@" | nj; }
1972 
1973 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1974 # pawk() { awk -F='' -v RS='' "$@"; }
1975 
1976 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1977 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1978 
1979 # Plain `fd`
1980 pfd() { fd --color=never "$@"; }
1981 
1982 # pick lines, using all the 1-based line-numbers given
1983 picklines() {
1984     awk '
1985         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1986         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1987         { l[++n] = $0 }
1988         END {
1989             for (i = 1; i <= m; i++) {
1990                 j = p[i]
1991                 if (j < 0) j += NR + 1
1992                 if (0 < j && j <= NR) print l[j]
1993             }
1994         }
1995     ' "$@"
1996 }
1997 
1998 # Plain Interactive Grep
1999 pig() { ugrep --color=never -Q -E "$@"; }
2000 
2001 # make text plain, by ignoring ANSI terminal styling
2002 plain() {
2003     awk '
2004         {
2005             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
2006             gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences
2007             print; fflush()
2008         }
2009     ' "$@"
2010 }
2011 
2012 # end all lines with an ANSI-code to reset styles
2013 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
2014 
2015 # end all lines with an ANSI-code to reset styles
2016 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
2017 
2018 # play audio/video media
2019 # play() { mplayer -msglevel all=-1 "${@:--}"; }
2020 
2021 # play audio/video media
2022 play() { mpv "${@:--}"; }
2023 
2024 # Pick LINE, using the 1-based line-number given
2025 pline() {
2026     local line="$1"
2027     [ $# -gt 0 ] && shift
2028     awk -v n="${line}" '
2029         BEGIN { if (n < 1) exit 0 }
2030         NR == n { print; exit 0 }
2031     ' "$@"
2032 }
2033 
2034 # Paused MPV; especially useful when trying to view pictures via `mpv`
2035 pmpv() { mpv --pause "${@:--}"; }
2036 
2037 # Print Python result
2038 pp() { python -c "print($1)"; }
2039 
2040 # PRecede (input) ECHO, prepends a first line to stdin lines
2041 precho() { echo "$@" && cat /dev/stdin; }
2042 
2043 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
2044 prememo() {
2045     awk '
2046         BEGIN {
2047             if (ARGC > 1) printf "\x1b[7m"
2048             for (i = 1; i < ARGC; i++) {
2049                 if (i > 1) printf " "
2050                 printf "%s", ARGV[i]
2051                 delete ARGV[i]
2052             }
2053             if (ARGC > 1) printf "\x1b[0m\n"
2054             fflush()
2055         }
2056         { print; fflush() }
2057     ' "$@"
2058 }
2059 
2060 # start by joining all arguments given as a tab-separated-items line of output,
2061 # followed by all lines from stdin verbatim
2062 pretsv() {
2063     awk '
2064         BEGIN {
2065             for (i = 1; i < ARGC; i++) {
2066                 if (i > 1) printf "\t"
2067                 printf "%s", ARGV[i]
2068                 delete ARGV[i]
2069             }
2070             if (ARGC > 1) printf "\n"
2071             fflush()
2072         }
2073         { print; fflush() }
2074     ' "$@"
2075 }
2076 
2077 # Plain Recursive Interactive Grep
2078 prig() { ugrep --color=never -r -Q -E "$@"; }
2079 
2080 # show/list all current processes
2081 processes() {
2082     local res
2083     res="$(ps aux)"
2084     echo "${res}" | awk '!/ps aux$/' | sed -E \
2085         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
2086         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
2087 }
2088 
2089 # Play Youtube Audio
2090 pya() {
2091     local url
2092     # some youtube URIs end with extra playlist/tracker parameters
2093     url="$(echo "$1" | sed 's-&.*--')"
2094     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2095 }
2096 
2097 # Quiet ignores stderr, without any ugly keyboard-dancing
2098 q() { "$@" 2> /dev/null; }
2099 
2100 # Quiet MPV
2101 qmpv() { mpv --quiet "${@:--}"; }
2102 
2103 # ignore stderr, without any ugly keyboard-dancing
2104 quiet() { "$@" 2> /dev/null; }
2105 
2106 # Reset the screen, which empties it and resets the current style
2107 r() { reset; }
2108 
2109 # keep only lines between the 2 line numbers given, inclusively
2110 rangelines() {
2111     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
2112         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
2113 }
2114 
2115 # RANdom MANual page
2116 ranman() {
2117     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
2118         sed 's-\.gz$--' | xargs man
2119 }
2120 
2121 # Run AWK expression
2122 rawk() {
2123     local expr="${1:-0}"
2124     [ $# -gt 0 ] && shift
2125     awk "BEGIN { print ${expr}; exit }" "$@"
2126 }
2127 
2128 # play a ready-phone-line sound lasting the number of seconds given, or for 1
2129 # second by default; uses my script `waveout`
2130 ready() {
2131     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
2132     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2133 }
2134 
2135 # reflow/trim lines of prose (text) to improve its legibility: it's especially
2136 # useful when the text is pasted from web-pages being viewed in reader mode
2137 reprose() {
2138     local w="${1:-80}"
2139     [ $# -gt 0 ] && shift
2140     awk '
2141         FNR == 1 && NR > 1 { print "" }
2142         { gsub(/\r$/, ""); print; fflush() }
2143     ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--'
2144 }
2145 
2146 # ignore ansi styles from stdin and restyle things using the style-name given;
2147 # uses my script `style`
2148 restyle() { style "$@"; }
2149 
2150 # change the tab-title on your terminal app
2151 retitle() { printf "\e]0;%s\a\n" "$*"; }
2152 
2153 # REVerse-order SIZE (byte-count)
2154 revsize() { wc -c "$@" | sort -rn; }
2155 
2156 # Run In Folder
2157 rif() {
2158     local code
2159     pushd "${1:-.}" > /dev/null || return
2160     [ $# -gt 0 ] && shift
2161     "$@"
2162     code=$?
2163     popd > /dev/null || return "${code}"
2164     return "${code}"
2165 }
2166 
2167 # play a ringtone-style sound lasting the number of seconds given, or for 1
2168 # second by default; uses my script `waveout`
2169 ringtone() {
2170     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2171     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2172 }
2173 
2174 # Read-Only Editor
2175 roe() { micro -readonly true "$@"; }
2176 
2177 # Read-Only Micro (text editor)
2178 rom() { micro -readonly true "$@"; }
2179 
2180 # run the command given, trying to turn its output into TSV (tab-separated
2181 # values); uses my script `dejson`
2182 rtab() { jc "$@" | dejson; }
2183 
2184 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2185 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2186 
2187 # show a RULER-like width-measuring line
2188 # ruler() {
2189 #     local n="${1:-$(tput cols)}"
2190 #     [ "${n}" -gt 0 ] && printf "%${n}s\n" "" |
2191 #         sed -E 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2192 # }
2193 
2194 # show a RULER-like width-measuring line
2195 ruler() {
2196     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2197         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2198 }
2199 
2200 # run the command given, trying to turn its output into TSV (tab-separated
2201 # values); uses my script `dejson`
2202 runtab() { jc "$@" | dejson; }
2203 
2204 # run the command given, trying to turn its output into TSV (tab-separated
2205 # values); uses my script `dejson`
2206 runtsv() { jc "$@" | dejson; }
2207 
2208 # Reverse-order WC
2209 rwc() { wc "$@" | sort -rn; }
2210 
2211 # extended-mode Sed, enabling its full regex syntax
2212 # s() { sed -E -u "$@"; }
2213 
2214 # Substitute using `sed`, enabling its full regex syntax
2215 s() { sed -E -u "$(printf "s\xff%s\xff%s\xffg" "$1" "$2")"; }
2216 
2217 # Silent CURL spares you the progress bar, but still tells you about errors
2218 scurl() { curl --show-error -s "$@"; }
2219 
2220 # show a unique-looking SEParator line; useful to run between commands
2221 # which output walls of text
2222 sep() {
2223     [ "${1:-80}" -gt 0 ] &&
2224         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2225 }
2226 
2227 # webSERVE files in a folder as localhost, using the port number given, or
2228 # port 8080 by default
2229 serve() {
2230     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2231     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2232 }
2233 
2234 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2235 setdiff() {
2236     # comm -23 <(sort "$1") <(sort "$2")
2237     # dash doesn't support the process-sub syntax
2238     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2239 }
2240 
2241 # SET INtersection, sorts its 2 inputs, then finds common lines
2242 setin() {
2243     # comm -12 <(sort "$1") <(sort "$2")
2244     # dash doesn't support the process-sub syntax
2245     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2246 }
2247 
2248 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2249 setsub() {
2250     # comm -23 <(sort "$1") <(sort "$2")
2251     # dash doesn't support the process-sub syntax
2252     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2253 }
2254 
2255 # Show Files (and folders), coloring folders and links; uses my script `nn`
2256 sf() {
2257     local arg
2258     local gap=0
2259 
2260     for arg in "${@:-.}"; do
2261         [ "${gap}" -gt 0 ] && printf "\n"
2262         printf "\e[7m%s\e[0m\n\n" "$(realpath "${arg}")"
2263         gap=1
2264 
2265         ls -al --file-type --color=never --time-style iso "${arg}" | awk '
2266             BEGIN {
2267                 drep = "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m"
2268                 lrep = "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m"
2269             }
2270 
2271             (NR - 1) % 5 == 1 && NR > 1 { print "" }
2272 
2273             {
2274                 gsub(/^(d[rwx-]+)/, drep)
2275                 gsub(/^(l[rwx-]+)/, lrep)
2276                 printf "%6d  %s\n", NR - 1, $0; fflush()
2277             }
2278         '
2279     done | nn --gray | less -JMKiCRS
2280 }
2281 
2282 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2283 # uses my scripts `nn` and `cext`
2284 sfp() {
2285     local arg
2286     local gap=0
2287 
2288     for arg in "${@:-.}"; do
2289         [ "${gap}" -gt 0 ] && printf "\n"
2290         printf "\e[7m%s\e[0m\n\n" "$(realpath "${arg}")"
2291         gap=1
2292 
2293         ls -al --file-type --color=never --time-style iso "${arg}" | awk '
2294             BEGIN {
2295                 drep = "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m"
2296                 lrep = "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m"
2297             }
2298 
2299             (NR - 1) % 5 == 1 && NR > 1 { print "" }
2300 
2301             {
2302                 gsub(/^(d[rwx-]+)/, drep)
2303                 gsub(/^(l[rwx-]+)/, lrep)
2304                 printf "%6d  %s\n", NR - 1, $0; fflush()
2305             }
2306         '
2307     done | nn --gray | cext | less -JMKiCRS
2308 }
2309 
2310 # Show File Sizes, using my scripts `nn` and `cext`
2311 sfs() {
2312     # turn arg-list into single-item lines
2313     printf "%s\x00" "$@" |
2314     # calculate file-sizes, and reverse-sort results
2315     xargs -0 wc -c | sort -rn |
2316     # add/realign fields to improve legibility
2317     awk '
2318         # start output with a header-like line, and add a MiB field
2319         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2320         # make table breathe with empty lines, so tall outputs are readable
2321         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2322         # emit regular output lines
2323         {
2324             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2325             # first field is likely space-padded
2326             gsub(/^ */, "")
2327             # slice line after the first field, as filepaths can have spaces
2328             $0 = substr($0, length($1) + 1)
2329             # first field is likely space-padded
2330             gsub(/^ /, "")
2331             printf "%s\n", $0; fflush()
2332         }
2333     ' |
2334     # make zeros in the MiB field stand out with a special color
2335     awk '
2336         {
2337             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2338             print; fflush()
2339         }
2340     ' |
2341     # make numbers nice, alternating styles along 3-digit groups
2342     nn --gray |
2343     # color-code file extensions
2344     cext |
2345     # make result interactively browsable
2346     less -JMKiCRS
2347 }
2348 
2349 # SHell-run AWK output
2350 # shawk() { stdbuf -oL awk "$@" | sh; }
2351 
2352 # time-run various tools given one-per-line from stdin, giving them extra
2353 # common arguments passed as explicit arguments
2354 showdown() {
2355     awk '
2356         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2357         {
2358             printf "%s", $0
2359             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2360             printf "\x00"; fflush()
2361         }
2362     ' "$@" | xargs -0 hyperfine --style full
2363 }
2364 
2365 # SHOW a command, then RUN it
2366 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2367 
2368 # SHell-QUOTE each line from the input(s): this is useful to make lines of
2369 # single-filepaths compatible with `xargs`, since standard shell settings
2370 # get in the way of filepaths with spaces and other special symbols in them
2371 shquote() {
2372     awk '
2373         {
2374             s = $0
2375             gsub(/\r$/, "", s)
2376             gsub(/\\/, "\\\\", s)
2377             gsub(/"/, "\\\"", s)
2378             gsub(/`/, "\\`", s)
2379             gsub(/\$/, "\\$", s)
2380             printf "\"%s\"\n", s; fflush()
2381         }
2382     ' "$@"
2383 }
2384 
2385 # clean the screen, after running the command given
2386 sideshow() {
2387     local res
2388     tput smcup
2389     "$@"
2390     res=$?
2391     tput rmcup
2392     return "${res}"
2393 }
2394 
2395 # skip the first n lines, or the 1st line by default
2396 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2397 
2398 # skip the first n bytes
2399 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2400 
2401 # skip the last n lines, or the last line by default
2402 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2403 
2404 # skip the last n bytes
2405 skiplastbytes() { head -c -"$1" "${2:--}"; }
2406 
2407 # skip the last n lines, or the last line by default
2408 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2409 
2410 # skip the first n lines, or the 1st line by default
2411 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2412 
2413 # SLOW/delay lines from the standard-input, waiting the number of seconds
2414 # given for each line, or waiting 1 second by default
2415 slow() {
2416     local seconds="${1:-1}"
2417     (
2418         IFS="$(printf "\n")"
2419         while read -r line; do
2420             sleep "${seconds}"
2421             printf "%s\n" "${line}"
2422         done
2423     )
2424 }
2425 
2426 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2427 slp() {
2428     local title
2429     title="Latest Podcast Episodes as of $(date +'%F %T')"
2430     podfeed -title "${title}" "$@" | si
2431 }
2432 
2433 # recursively find all files with fewer bytes than the number given
2434 smallfiles() {
2435     local n
2436     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2437     [ $# -gt 0 ] && shift
2438 
2439     local arg
2440     for arg in "${@:-.}"; do
2441         if [ ! -d "${arg}" ]; then
2442             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2443             return 1
2444         fi
2445         stdbuf -oL find "${arg}" -type f -size -"$n"c
2446     done
2447 }
2448 
2449 # emit the first line as is, sorting all lines after that, using the
2450 # `sort` command, passing all/any arguments/options to it
2451 sortrest() {
2452     awk -v sort="sort $*" '
2453         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2454         { gsub(/\r$/, "") }
2455         NR == 1 { print; fflush() }
2456         NR > 1 { print | sort }
2457     '
2458 }
2459 
2460 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2461 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2462 # all/any arguments/options to it
2463 sortsv() {
2464     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2465         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2466         { gsub(/\r$/, "") }
2467         NR == 1 { print; fflush() }
2468         NR > 1 { print | sort }
2469     '
2470 }
2471 
2472 # emit a line with the number of spaces given in it
2473 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2474 
2475 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2476 # in the middle of lines, as well as trailing carriage returns
2477 squeeze() {
2478     awk '
2479         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2480         {
2481             gsub(/^ +| *\r?$/, "")
2482             gsub(/ *\t */, "\t")
2483             gsub(/  +/, " ")
2484             print; fflush()
2485         }
2486     ' "$@"
2487 }
2488 
2489 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2490 # of multiple spaces in the middle of lines, as well as trailing carriage
2491 # returns, while also turning runs of empty lines into single empty lines,
2492 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2493 # lines vertically
2494 squomp() {
2495     awk '
2496         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2497         /^\r?$/ { empty = 1; next }
2498         empty { if (n > 0) print ""; empty = 0 }
2499         {
2500             gsub(/^ +| *\r?$/, "")
2501             gsub(/ *\t */, "\t")
2502             gsub(/  +/, " ")
2503             print; fflush()
2504             n++
2505         }
2506     ' "$@"
2507 }
2508 
2509 # Show a command, then Run it
2510 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; }
2511 
2512 # turn runs of empty lines into single empty lines, effectively squeezing
2513 # paragraphs vertically, so to speak; runs of empty lines both at the start
2514 # and at the end are ignored
2515 stomp() {
2516     awk '
2517         /^\r?$/ { empty = 1; next }
2518         empty { if (n > 0) print ""; empty = 0 }
2519         { print; fflush(); n++ }
2520     ' "$@"
2521 }
2522 
2523 # STRike-thru (lines) with AWK
2524 strawk() {
2525     local cond="${1:-1}"
2526     [ $# -gt 0 ] && shift
2527     awk '
2528         { low = lower = tolower($0) }
2529         '"${cond}"' {
2530             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2531             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2532             next
2533         }
2534         { print; fflush() }
2535     ' "$@"
2536 }
2537 
2538 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2539 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2540 # all/any arguments/options to it
2541 stsv() {
2542     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2543         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2544         { gsub(/\r$/, "") }
2545         NR == 1 { print; fflush() }
2546         NR > 1 { print | sort }
2547     '
2548 }
2549 
2550 # use the result of the `awk` function `substr` for each line
2551 substr() {
2552     local start="${1:-1}"
2553     local length="${2:-80}"
2554     [ $# -gt 0 ] && shift
2555     [ $# -gt 0 ] && shift
2556     awk -v start="${start}" -v len="${length}" \
2557         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2558 }
2559 
2560 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2561 # what's given, before relinquishing existing privileges
2562 # sudoff() {
2563 #     local code=0
2564 #     if [ $# -gt 0 ]; then
2565 #         sudo "$@"
2566 #         code=$?
2567 #     fi
2568 #     sudo -k
2569 #     return "${code}"
2570 # }
2571 
2572 # append a final Tab-Separated-Values line with the sums of all columns from
2573 # the input table(s) given; items from first lines aren't counted/added
2574 sumtsv() {
2575     awk -F "\t" '
2576         # FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2577 
2578         {
2579             gsub(/\r$/, "")
2580             print; fflush()
2581             if (width < NF) width = NF
2582         }
2583 
2584         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2585 
2586         END {
2587             for (i = 1; i <= width; i++) {
2588                 if (i > 1) printf "\t"
2589                 printf "%s", sums[i] ""
2590             }
2591             if (width > 0) printf "\n"
2592         }
2593     ' "$@"
2594 }
2595 
2596 # show a random command defined in `clam`, using `wat` from `clam` itself
2597 # surprise() {
2598 #     local p="$(which clam)"
2599 #     wat "$(grep -E '^[a-z]+\(' "$p" | shuf -n 1 | sed -E 's-\(.*--')"
2600 # }
2601 
2602 # Time the command given
2603 t() { /usr/bin/time "$@"; }
2604 
2605 # show a reverse-sorted tally of all lines read, where ties are sorted
2606 # alphabetically
2607 tally() {
2608     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2609         # reassure users by instantly showing the header
2610         BEGIN { print "value\ttally"; fflush() }
2611         { gsub(/\r$/, ""); t[$0]++ }
2612         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2613     ' "$@"
2614 }
2615 
2616 # Tab AWK: TSV-specific I/O settings for `awk`
2617 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2618 
2619 # Tab AWK: TSV-specific I/O settings for `awk`
2620 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2621 
2622 # quick alias for my script `tbp`
2623 tb() { tbp "$@"; }
2624 
2625 # Titled conCATenate Lines highlights each filename, before emitting its
2626 # lines
2627 tcatl() {
2628     awk '
2629         FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() }
2630         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2631         { gsub(/\r$/, ""); print; fflush() }
2632     ' "$@"
2633 }
2634 
2635 # Title ECHO changes the tab-title on your terminal app
2636 techo() { printf "\e]0;%s\a\n" "$*"; }
2637 
2638 # simulate the cadence of old-fashioned teletype machines, by slowing down
2639 # the output of ASCII/UTF-8 symbols from the standard-input
2640 # teletype() {
2641 #     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2642 #         IFS="$(printf "\n")"
2643 #         while read -r line; do
2644 #             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2645 #                 while read -r item; do
2646 #                     sleep 0.015
2647 #                     printf "%s" "${item}"
2648 #                 done
2649 #             sleep 0.75
2650 #             printf "\n"
2651 #         done
2652 #     )
2653 # }
2654 
2655 # simulate the cadence of old-fashioned teletype machines, by slowing down
2656 # the output of ASCII/UTF-8 symbols from the standard-input
2657 teletype() {
2658     awk '
2659         {
2660             gsub(/\r$/, "")
2661 
2662             n = length($0)
2663             for (i = 1; i <= n; i++) {
2664                 if (code = system("sleep 0.015")) exit code
2665                 printf "%s", substr($0, i, 1); fflush()
2666             }
2667             if (code = system("sleep 0.75")) exit code
2668             printf "\n"; fflush()
2669         }
2670     ' "$@"
2671 }
2672 
2673 # run `top` without showing any of its output after quitting it
2674 tip() { tput smcup; top "$@"; tput rmcup; }
2675 
2676 # change the tab-title on your terminal app
2677 title() { printf "\e]0;%s\a\n" "$*"; }
2678 
2679 # quick alias for my script `tjp`
2680 tj() { tjp "$@"; }
2681 
2682 # quick alias for my script `tlp`
2683 tl() { tlp "$@"; }
2684 
2685 # show current date in a specifc format
2686 today() { date +'%Y-%m-%d %a %b %d'; }
2687 
2688 # get the first n lines, or 1 by default
2689 toline() { head -n "${1:-1}" "${2:--}"; }
2690 
2691 # lowercase all ASCII symbols
2692 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2693 
2694 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2695 # second by default: after the optional duration, the next optional arguments
2696 # are the volume and the tone-frequency; uses my script `waveout`
2697 tone() {
2698     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2699         mpv --really-quiet -
2700 }
2701 
2702 # get the processes currently using the most cpu
2703 topcpu() {
2704     local n="${1:-10}"
2705     [ "$n" -gt 0 ] && ps aux | awk '
2706         NR == 1 { print; fflush() }
2707         NR > 1 { print | "sort -rnk3" }
2708     ' | head -n "$(("$n" + 1))"
2709 }
2710 
2711 # show all files directly in the folder given, without looking any deeper
2712 topfiles() {
2713     local arg
2714     for arg in "${@:-.}"; do
2715         if [ ! -d "${arg}" ]; then
2716             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2717             return 1
2718         fi
2719         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2720     done
2721 }
2722 
2723 # show all folders directly in the folder given, without looking any deeper
2724 topfolders() {
2725     local arg
2726     for arg in "${@:-.}"; do
2727         if [ ! -d "${arg}" ]; then
2728             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2729             return 1
2730         fi
2731         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2732             awk '!/^\.$/ { print; fflush() }'
2733     done
2734 }
2735 
2736 # get the processes currently using the most memory
2737 topmemory() {
2738     local n="${1:-10}"
2739     [ "$n" -gt 0 ] && ps aux | awk '
2740         NR == 1 { print; fflush() }
2741         NR > 1 { print | "sort -rnk6" }
2742     ' | head -n "$(("$n" + 1))"
2743 }
2744 
2745 # transpose (switch) rows and columns from tables
2746 transpose() {
2747     awk '
2748         { gsub(/\r$/, "") }
2749 
2750         FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 }
2751 
2752         {
2753             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2754             if (maxitems < NF) maxitems = NF
2755         }
2756 
2757         END {
2758             for (j = 1; j <= maxitems; j++) {
2759                 for (i = 1; i <= NR; i++) {
2760                     if (i > 1) printf "\t"
2761                     printf "%s", lines[j][i]
2762                 }
2763                 printf "\n"
2764             }
2765         }
2766     ' "$@"
2767 }
2768 
2769 # ignore leading/trailing spaces, as well as trailing carriage returns
2770 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2771 
2772 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2773 # decimal dots themselves, when decimals in a number are all zeros; works
2774 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2775 # trimdecs() {
2776 #     awk '
2777 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2778 #         {
2779 #             gsub(/\r$/, "")
2780 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2781 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2782 #             print; fflush()
2783 #         }
2784 #     ' "$@"
2785 # }
2786 
2787 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2788 # decimal dots themselves, when decimals in a number are all zeros
2789 trimdecs() {
2790     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" |
2791         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2792 }
2793 
2794 # ignore trailing spaces, as well as trailing carriage returns
2795 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2796 
2797 # ignore trailing spaces, as well as trailing carriage returns
2798 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2799 
2800 # ignore leading/trailing spaces, as well as trailing carriage returns
2801 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2802 
2803 # ignore leading/trailing spaces, as well as trailing carriage returns
2804 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2805 
2806 # ignore trailing spaces, as well as trailing carriage returns
2807 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2808 
2809 # ignore trailing spaces, as well as trailing carriage returns
2810 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2811 
2812 # try running a command, emitting an explicit message to standard-error
2813 # if the command given fails
2814 try() {
2815     "$@" || {
2816         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2817         return 255
2818     }
2819 }
2820 
2821 # Transform Strings with Python; uses my script `tbp`
2822 tsp() { tbp -s "$@"; }
2823 
2824 # run the command given, trying to turn its output into TSV (tab-separated
2825 # values); uses my script `dejson`
2826 tsvrun() { jc "$@" | dejson; }
2827 
2828 # Underline (lines) with AWK
2829 uawk() {
2830     local cond="${1:-1}"
2831     [ $# -gt 0 ] && shift
2832     awk '
2833         { low = lower = tolower($0) }
2834         '"${cond}"' {
2835             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2836             printf "\x1b[4m%s\x1b[0m\n", $0; fflush()
2837             next
2838         }
2839         { print; fflush() }
2840     ' "$@"
2841 }
2842 
2843 # Underline Every few lines: make groups of 5 lines (by default) stand out by
2844 # underlining the last line of each
2845 ue() {
2846     local n="${1:-5}"
2847     [ $# -gt 0 ] && shift
2848     awk -v n="$n" '
2849         BEGIN { if (n == 0) n = -1 }
2850         NR % n == 0 && NR != 1 {
2851             gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m")
2852             printf("\x1b[4m%s\x1b[0m\n", $0); fflush()
2853             next
2854         }
2855         { print; fflush() }
2856     ' "$@"
2857 }
2858 
2859 # deduplicate lines, keeping them in their original order
2860 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2861 
2862 # concatenate all named input sources unix-style: all trailing CRLFs become
2863 # single LFs, each non-empty input will always end in a LF, so lines from
2864 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2865 # first line of each input are ignored, as those are useless at best
2866 unixify() {
2867     awk '
2868         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2869         { gsub(/\r$/, ""); print; fflush() }
2870     ' "$@"
2871 }
2872 
2873 # go UP n folders, or go up 1 folder by default
2874 up() {
2875     if [ "${1:-1}" -le 0 ]; then
2876         cd .
2877         return $?
2878     fi
2879 
2880     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2881 }
2882 
2883 # convert United States Dollars into CAnadian Dollars, using the latest
2884 # official exchange rates from the bank of canada; during weekends, the
2885 # latest rate may be from a few days ago; the default amount of usd to
2886 # convert is 1, when not given
2887 usd2cad() {
2888     local site='https://www.bankofcanada.ca/valet/observations/group'
2889     local csv_rates="${site}/FX_RATES_DAILY/csv"
2890     local url
2891     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2892     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2893         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2894         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2895 }
2896 
2897 # View with `less`
2898 v() { less -JMKiCRS "$@"; }
2899 
2900 # run a command, showing its success/failure right after
2901 verdict() {
2902     local code
2903     "$@"
2904     code=$?
2905 
2906     if [ "${code}" -eq 0 ]; then
2907         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2908     else
2909         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2910     fi
2911     return "${code}"
2912 }
2913 
2914 # run `cppcheck` with even stricter options
2915 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2916 
2917 # run `cppcheck` with even stricter options
2918 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2919 
2920 # check shell scripts for common gotchas, avoiding complaints about using
2921 # the `local` keyword, which is widely supported in practice
2922 vetshell() { shellcheck -e 3043 "$@"; }
2923 
2924 # View with Header runs `less` without line numbers, with ANSI styles, no
2925 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2926 # so they always show on top
2927 vh() {
2928     local n="${1:-1}"
2929     [ $# -gt 0 ] && shift
2930     less --header="$n" -JMKiCRS "$@"
2931 }
2932 
2933 # VIEW the result of showing a command, then RUNning it, using `less`
2934 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2935 
2936 # View Nice Columns; uses my scripts `realign` and `nn`
2937 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2938 
2939 # View Nice Hexadecimals; uses my script `nh`
2940 vnh() { nh "$@" | less -JMKiCRS; }
2941 
2942 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2943 vnj() { nj "$@" | less -JMKiCRS; }
2944 
2945 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2946 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2947 
2948 # View Nice Numbers; uses my script `nn`
2949 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2950 
2951 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2952 vnt() {
2953     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2954         nt | nn --gray |
2955         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2956         less -JMKiCRS #--header=1
2957 }
2958 
2959 # View-Run using `less`: show a command, then run it
2960 # vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less --header=1 -JMKiCRS; }
2961 
2962 # View-Run using `less`: show a command, then run it
2963 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; }
2964 
2965 # View Text with `less`
2966 # vt() { less -JMKiCRS "$@"; }
2967 
2968 # View Text with the `micro` text-editor in read-only mode
2969 vt() { micro -readonly true "$@"; }
2970 
2971 # What are these (?); uses my command `nwat`
2972 # w() { nwat "$@"; }
2973 
2974 # What Are These (?) shows what the names given to it are/do
2975 wat() {
2976     local a
2977     local gap=0
2978 
2979     if [ $# -eq 0 ]; then
2980         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2981         return 1
2982     fi
2983 
2984     for a in "$@"; do
2985         [ "${gap}" -gt 0 ] && printf "\n"
2986         gap=1
2987         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2988         printf "\e[7m%-80s\e[0m\n" "$a"
2989 
2990         # resolve 1 alias level
2991         if alias "$a" 2> /dev/null > /dev/null; then
2992             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2993         fi
2994 
2995         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2996             # resolved aliases with args/spaces in them would otherwise fail
2997             echo "$a"
2998         elif whence -f "$a" > /dev/null 2> /dev/null; then
2999             # zsh seems to show a shell function's code only via `whence -f`
3000             whence -f "$a"
3001         elif type "$a" > /dev/null 2> /dev/null; then
3002             # dash doesn't support `declare`, and `type` in bash emits
3003             # a redundant first output line, when it's a shell function
3004             type "$a" | awk '
3005                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
3006                 { print; fflush() }
3007                 END { if (NR < 2 && skipped) print skipped }
3008             '
3009         else
3010             printf "\e[31m%s not found\e[0m\n" "$a"
3011         fi
3012     done | less -JMKiCRS
3013 }
3014 
3015 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
3016 # lines instead
3017 wctsv() {
3018     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
3019     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
3020         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
3021         awk '
3022             NR > 1 { print prev; fflush() }
3023             { prev = $0 }
3024             END { if (NR == 1 || !/^total\t/) print }
3025         '
3026 }
3027 
3028 # get weather forecasts, almost filling the terminal's current width
3029 # weather() {
3030 #     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
3031 #     curl --show-error -s telnet://graph.no:79 |
3032 #     sed -E \
3033 #         -e 's/ *\r?$//' \
3034 #         -e '/^\[/d' \
3035 #         -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
3036 #         -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
3037 #         -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
3038 #         -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
3039 #         -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
3040 #         -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
3041 #         -e 's/\*/○/g' |
3042 #     awk 1 |
3043 #     less -JMKiCRS
3044 # }
3045 
3046 # get weather forecasts; uses my script `nwf`
3047 weather() { nwf "$@"; }
3048 
3049 # Weather Forecast
3050 wf() {
3051     printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 |
3052         awk '{ print; fflush() }' | less -JMKiCRS
3053 }
3054 
3055 # WGet to standard output
3056 wg() { wget -O - "$@"; }
3057 
3058 # WGET to standard Output
3059 wgeto() { wget -O - "$@"; }
3060 
3061 # recursively find all files with trailing spaces/CRs
3062 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
3063 
3064 # recursively find all files with trailing spaces/CRs
3065 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
3066 
3067 # turn all full linux/unix-style paths (which start from the filesystem root)
3068 # detected into WINdows-style PATHS
3069 winpaths() {
3070     awk '{ print; fflush() }' "$@" |
3071         sed -u -E 's-(/mnt/([A-Za-z])(/))-\u\2:/-g'
3072 }
3073 
3074 # run `xargs`, using whole lines as extra arguments
3075 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
3076 
3077 # run `xargs`, using whole lines as extra arguments
3078 # x() {
3079 #     awk -v ORS='\000' '
3080 #         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
3081 #         { gsub(/\r$/, ""); print; fflush() }
3082 #     ' | xargs -0 "$@"
3083 # }
3084 
3085 # run `xargs`, using zero/null bytes as the extra-arguments terminator
3086 x0() { xargs -0 "$@"; }
3087 
3088 # run `xargs`, using whole lines as extra arguments
3089 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; }
3090 
3091 # run `xargs`, using whole lines as extra arguments
3092 xl() {
3093     awk -v ORS='\000' '
3094         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
3095         { gsub(/\r$/, ""); print; fflush() }
3096     ' | xargs -0 "$@"
3097 }
3098 
3099 # Youtube Audio Player
3100 yap() {
3101     local url
3102     # some youtube URIs end with extra playlist/tracker parameters
3103     url="$(echo "$1" | sed 's-&.*--')"
3104     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
3105 }
3106 
3107 # show a calendar for the current YEAR, or for the year given
3108 year() {
3109     {
3110         # show the current date/time center-aligned
3111         printf "%20s\e[38;2;78;154;6m%s\e[0m  \e[38;2;52;101;164m%s\e[0m\n\n" \
3112             "" "$(date +'%a %b %d %Y')" "$(date +%T)"
3113         # debian linux has a different `cal` app which highlights the day
3114         if [ -e "/usr/bin/ncal" ]; then
3115             # fix debian/ncal's weird way to highlight the current day
3116             ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g'
3117         else
3118             cal -y "$@"
3119         fi
3120     } | less -JMKiCRS
3121 }
3122 
3123 # show the current date in the YYYY-MM-DD format
3124 ymd() { date +'%Y-%m-%d'; }
3125 
3126 # YouTube Url
3127 ytu() {
3128     local url
3129     # some youtube URIs end with extra playlist/tracker parameters
3130     url="$(echo "$1" | sed 's-&.*--')"
3131     [ $# -gt 0 ] && shift
3132     yt-dlp "$@" --get-url "${url}"
3133 }
3134 
3135 # . <(
3136 #     find "$(dirname $(which clam))" -type f -print0 |
3137 #         xargs -0 -n 1 basename |
3138 #         awk '{ print "unset " $0; print "unalias " $0 }'
3139 # ) 2> /dev/null