File: clam.sh
   1 #!/bin/sh
   2 
   3 # The MIT License (MIT)
   4 #
   5 # Copyright © 2020-2025 pacman64
   6 #
   7 # Permission is hereby granted, free of charge, to any person obtaining a copy
   8 # of this software and associated documentation files (the “Software”), to deal
   9 # in the Software without restriction, including without limitation the rights
  10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 # copies of the Software, and to permit persons to whom the Software is
  12 # furnished to do so, subject to the following conditions:
  13 #
  14 # The above copyright notice and this permission notice shall be included in
  15 # all copies or substantial portions of the Software.
  16 #
  17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 # SOFTWARE.
  24 
  25 
  26 # clam
  27 #
  28 # Command-Line Augmentation Module (clam): get the best out of your shell
  29 #
  30 #
  31 # This is a collection of arguably useful shell functions and shortcuts:
  32 # some of these extra commands can be real time/effort savers, ideally
  33 # letting you concentrate on getting things done.
  34 #
  35 # Some of these commands depend on my other scripts from the `pac-tools`,
  36 # others either rely on widely-preinstalled command-line apps, or ones
  37 # which are available on most of the major command-line `package` managers.
  38 #
  39 # Among these commands, you'll notice a preference for lines whose items
  40 # are tab-separated instead of space-separated, and unix-style lines, which
  41 # always end with a line-feed, instead of a CRLF byte-pair. This convention
  42 # makes plain-text data-streams less ambiguous and generally easier to work
  43 # with, especially when passing them along pipes.
  44 #
  45 # To use this script, you're supposed to `source` it, so its definitions
  46 # stay for your whole shell session: for that, you can run `source clam` or
  47 # `. clam` (no quotes either way), either directly or at shell startup.
  48 #
  49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is
  50 # debian linux's default non-interactive shell. Some of its commands even
  51 # seem to work on busybox's shell.
  52 
  53 
  54 # handle help options
  55 case "$1" in
  56     -h|--h|-help|--help)
  57         # show help message, using the info-comment from this very script
  58         awk '/^# +clam/, /^$/ { gsub(/^# ?/, ""); print }' "$0"
  59         exit 0
  60     ;;
  61 esac
  62 
  63 
  64 # dash doesn't support regex-matching syntax, forcing to use case statements
  65 case "$0" in
  66     -bash|-dash|-sh|bash|dash|sh)
  67         # script is being sourced with bash or dash, which is good
  68         :
  69     ;;
  70     *)
  71         case "$ZSH_EVAL_CONTEXT" in
  72             *:file)
  73                 # script is being sourced with zsh, which is good
  74                 :
  75             ;;
  76             *)
  77                 # script is being run normally, which is a waste of time
  78 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n"
  79 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n"
  80                 # failing during shell-startup may deny shell access, so exit
  81                 # with a 0 error-code to declare success
  82                 exit 0
  83             ;;
  84         esac
  85     ;;
  86 esac
  87 
  88 
  89 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
  90 c1() { bsbs 1 "$@"; }
  91 c2() { bsbs 2 "$@"; }
  92 c3() { bsbs 3 "$@"; }
  93 c4() { bsbs 4 "$@"; }
  94 c5() { bsbs 5 "$@"; }
  95 c6() { bsbs 6 "$@"; }
  96 c7() { bsbs 7 "$@"; }
  97 c8() { bsbs 8 "$@"; }
  98 c9() { bsbs 9 "$@"; }
  99 
 100 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 101 alias 1=c1
 102 alias 2=c2
 103 alias 3=c3
 104 alias 4=c4
 105 alias 5=c5
 106 alias 6=c6
 107 alias 7=c7
 108 alias 8=c8
 109 alias 9=c9
 110 
 111 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side)
 112 alias 1c=c1
 113 alias 2c=c2
 114 alias 3c=c3
 115 alias 4c=c4
 116 alias 5c=c5
 117 alias 6c=c6
 118 alias 7c=c7
 119 alias 8c=c8
 120 alias 9c=c9
 121 
 122 # Avoid/ignore lines which match any of the regexes given
 123 a() {
 124     awk '
 125         BEGIN {
 126             for (i = 1; i < ARGC; i++) {
 127                 e[i] = ARGV[i]
 128                 delete ARGV[i]
 129             }
 130         }
 131 
 132         {
 133             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 134             print; fflush()
 135             got++
 136         }
 137 
 138         END { exit(got == 0) }
 139     ' "${@:-^\r?$}"
 140 }
 141 
 142 # find name from the local `apt` database of installable packages
 143 # aptfind() {
 144 #     # despite warnings, the `apt search` command has been around for years
 145 #     # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//'
 146 #     apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | sed -u 's/^--$//'
 147 # }
 148 
 149 # emit each argument given as its own line of output
 150 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; }
 151 
 152 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become
 153 # their basic ASCII counterparts, and where non-ASCII symbols become question
 154 # marks, one question mark for each code-point byte
 155 asciify() { iconv -f utf-8 -t ascii//translit "$@"; }
 156 
 157 # avoid/ignore lines which match any of the regexes given
 158 avoid() {
 159     awk '
 160         BEGIN {
 161             for (i = 1; i < ARGC; i++) {
 162                 e[i] = ARGV[i]
 163                 delete ARGV[i]
 164             }
 165         }
 166 
 167         {
 168             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
 169             print; fflush()
 170             got++
 171         }
 172 
 173         END { exit(got == 0) }
 174     ' "${@:-^\r?$}"
 175 }
 176 
 177 # emit a line with a repeating ball-like symbol in it
 178 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; }
 179 
 180 # show an ansi-styled BANNER-like line
 181 banner() { printf "\e[7m%s\e[0m\n" "$*"; }
 182 
 183 # emit a colored bar which can help visually separate different outputs
 184 bar() {
 185     [ "${1:-80}" -gt 0 ] &&
 186         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" ""
 187 }
 188 
 189 # process Blocks/paragraphs of non-empty lines with AWK
 190 # bawk() { awk -F='' -v RS='' "$@"; }
 191 
 192 # process Blocks/paragraphs of non-empty lines with AWK
 193 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 194 
 195 # play a repeating and annoying high-pitched beep sound a few times a second,
 196 # lasting the number of seconds given, or for 1 second by default; uses my
 197 # script `waveout`
 198 beeps() {
 199     local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)'
 200     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 201 }
 202 
 203 # start by joining all arguments given as a tab-separated-items line of output,
 204 # followed by all lines from stdin verbatim
 205 begintsv() {
 206     awk '
 207         BEGIN {
 208             for (i = 1; i < ARGC; i++) {
 209                 if (i > 1) printf "\t"
 210                 printf "%s", ARGV[i]
 211                 delete ARGV[i]
 212             }
 213             if (ARGC > 1) printf "\n"
 214             fflush()
 215         }
 216         { print; fflush() }
 217     ' "$@"
 218 }
 219 
 220 # play a repeating synthetic-bell-like sound lasting the number of seconds
 221 # given, or for 1 second by default; uses my script `waveout`
 222 bell() {
 223     local f='sin(880*tau*u) * exp(-10*u)'
 224     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 225 }
 226 
 227 # play a repeating sound with synthetic-bells, lasting the number of seconds
 228 # given, or for 1 second by default; uses my script `waveout`
 229 bells() {
 230     local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2"
 231     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 232 }
 233 
 234 # Breathe Header: add an empty line after the first one (the header), then
 235 # separate groups of 5 lines (by default) with empty lines between them
 236 bh() {
 237     local n="${1:-5}"
 238     [ $# -gt 0 ] && shift
 239     awk -v n="$n" '
 240         BEGIN { if (n == 0) n = -1 }
 241         (NR - 1) % n == 1 && NR > 1 { print "" }
 242         { print; fflush() }
 243     ' "$@"
 244 }
 245 
 246 # recursively find all files with at least the number of bytes given; when
 247 # not given a minimum byte-count, the default is 100 binary megabytes
 248 bigfiles() {
 249     local n
 250     n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 251     [ $# -gt 0 ] && shift
 252 
 253     local arg
 254     for arg in "${@:-.}"; do
 255         if [ ! -d "${arg}" ]; then
 256             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 257             return 1
 258         fi
 259         stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c
 260     done
 261 }
 262 
 263 # Breathe Lines: separate groups of 5 lines (by default) with empty lines
 264 bl() {
 265     local n="${1:-5}"
 266     [ $# -gt 0 ] && shift
 267     awk -v n="$n" '
 268         BEGIN { if (n == 0) n = -1 }
 269         NR % n == 1 && NR != 1 { print "" }
 270         { print; fflush() }
 271     ' "$@"
 272 }
 273 
 274 # process BLocks/paragraphs of non-empty lines with AWK
 275 # blawk() { awk -F='' -v RS='' "$@"; }
 276 
 277 # process BLocks/paragraphs of non-empty lines with AWK
 278 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
 279 
 280 # emit a line with a repeating block-like symbol in it
 281 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; }
 282 
 283 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 284 # my script `bsbs`
 285 bman() {
 286     local w
 287     w="$(tput cols)"
 288     if [ "$w" -gt 120 ]; then
 289         w="$((w / 2 - 1))"
 290     fi
 291     MANWIDTH="$w" man "$@" | bsbs 2
 292 }
 293 
 294 # split lines using the regex given, turning them into single-item lines
 295 breakdown() {
 296     local sep="${1:- }"
 297     [ $# -gt 0 ] && shift
 298     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 299 }
 300 
 301 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses
 302 # my script `bsbs`
 303 bookman() {
 304     local w
 305     w="$(tput cols)"
 306     if [ "$w" -gt 120 ]; then
 307         w="$((w / 2 - 1))"
 308     fi
 309     MANWIDTH="$w" man "$@" | bsbs 2
 310 }
 311 
 312 # separate groups of 5 lines (by default) with empty lines
 313 breathe() {
 314     local n="${1:-5}"
 315     [ $# -gt 0 ] && shift
 316     awk -v n="$n" '
 317         BEGIN { if (n == 0) n = -1 }
 318         NR % n == 1 && NR != 1 { print "" }
 319         { print; fflush() }
 320     ' "$@"
 321 }
 322 
 323 # Browse Text
 324 bt() { less -JMKNiCRS "$@"; }
 325 
 326 # show a reverse-sorted tally of all lines read, where ties are sorted
 327 # alphabetically, and where trailing bullets are added to quickly make
 328 # the tally counts comparable at a glance
 329 bully() {
 330     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
 331         # reassure users by instantly showing the header
 332         BEGIN { print "value\ttally\tbullets"; fflush() }
 333 
 334         { gsub(/\r$/, ""); tally[$0]++ }
 335 
 336         END {
 337             # find the max tally, which is needed to build the bullets-string
 338             max = 0
 339             for (k in tally) {
 340                 if (max < tally[k]) max = tally[k]
 341             }
 342 
 343             # make enough bullets for all tallies: this loop makes growing the
 344             # string a task with complexity O(n * log n), instead of a naive
 345             # O(n**2), which can slow-down things when tallies are high enough
 346             bullets = "•"
 347             for (n = max; n > 1; n /= 2) {
 348                 bullets = bullets bullets
 349             }
 350 
 351             # emit unsorted output lines to the sort cmd, which will emit the
 352             # final reverse-sorted tally lines
 353             for (k in tally) {
 354                 s = substr(bullets, 1, tally[k])
 355                 printf("%s\t%d\t%s\n", k, tally[k], s) | sort
 356             }
 357         }
 358     ' "$@"
 359 }
 360 
 361 # play a busy-phone-line sound lasting the number of seconds given, or for 1
 362 # second by default; uses my script `waveout`
 363 busy() {
 364     # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2'
 365     local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2'
 366     # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))'
 367     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
 368 }
 369 
 370 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default
 371 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
 372 
 373 # keep all BUT the LAST n lines, or skip just the last line by default
 374 butlast() { head -n -"${1:-1}" "${2:--}"; }
 375 
 376 # load bytes from the filenames given
 377 bytes() { cat "$@"; }
 378 
 379 # quick alias for `cat`
 380 c() { cat "$@"; }
 381 
 382 # CAlculator with Nice numbers runs my script `ca` and colors results with
 383 # my script `nn`, alternating styles to make long numbers easier to read
 384 can() { ca "$@" | nn --gray; }
 385 
 386 # conCATenate Lines ignores leading byte-order marks on first lines, trailing
 387 # carriage-returns, and guarantees no lines are ever accidentally joined
 388 # across inputs, always emitting a line-feed at the end of every line
 389 catl() {
 390     awk '
 391         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 392         { gsub(/\r$/, ""); print; fflush() }
 393     ' "$@"
 394 }
 395 
 396 # Csv AWK: CSV-specific input settings for `awk`
 397 # cawk() { awk --csv "$@"; }
 398 
 399 # Csv AWK: CSV-specific input settings for `awk`
 400 cawk() { stdbuf -oL awk --csv "$@"; }
 401 
 402 # Compile C Stripped
 403 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; }
 404 
 405 # Colored Go Test on the folder given; uses my command `gbm`
 406 cgt() { go test "${1:-.}" 2>&1 | gbm '^ok' '^[-]* ?FAIL' '^\?'; }
 407 
 408 # ignore final life-feed from text, if it's the very last byte; also ignore
 409 # all trailing carriage-returns
 410 choplf() {
 411     awk '
 412         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 413         NR > 1 { print ""; fflush() }
 414         { gsub(/\r$/, ""); printf "%s", $0; fflush() }
 415     ' "$@"
 416 }
 417 
 418 # Color Json using the `jq` app, allowing an optional filepath as the data
 419 # source, and even an optional transformation formula
 420 cj() { jq -C "${2:-.}" "${1:--}"; }
 421 
 422 # clean the screen, after running the command given
 423 clean() { tput smcup; "$@"; tput rmcup; }
 424 
 425 # show a live digital clock
 426 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; }
 427 
 428 # Colored Live/Line-buffered RipGrep ensures results show up immediately,
 429 # also emitting colors when piped
 430 clrg() { rg --color=always --line-buffered "$@"; }
 431 
 432 # CLear Screen, like the old dos command of the same name
 433 cls() { clear; }
 434 
 435 # COunt COndition: count how many times the AWK expression given is true
 436 coco() {
 437     local cond="${1:-1}"
 438     [ $# -gt 0 ] && shift
 439     awk "
 440         { low = lower = tolower(\$0) }
 441         ${cond} { count++ }
 442         END { print count }
 443     " "$@"
 444 }
 445 
 446 # Colored RipGrep ensures app `rg` emits colors when piped
 447 crg() { rg --color=always --line-buffered "$@"; }
 448 
 449 # emit a line with a repeating cross-like symbol in it
 450 crosses() {
 451     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g'
 452 }
 453 
 454 # split lines using the regex given, turning them into single-item lines
 455 crumble() {
 456     local sep="${1:- }"
 457     [ $# -gt 0 ] && shift
 458     awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"
 459 }
 460 
 461 # turn Comma-Separated-Values tables into Tab-Separated-Values tables
 462 csv2tsv() { xsv fmt -t '\t' "$@"; }
 463 
 464 # Change Units turns common US units into international ones; uses my
 465 # scripts `bu` (Better Units) and `nn` (Nice Numbers)
 466 cu() {
 467     bu "$@" | awk '
 468         NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF }
 469         NF == 4 && $NF != "s" { print $NF }
 470     ' | nn --gray
 471 }
 472 
 473 # CURL Silent spares you the progress bar, but still tells you about errors
 474 curls() { curl --show-error -s "$@"; }
 475 
 476 # Count With AWK: count the times the AWK expression/condition given is true
 477 cwawk() {
 478     local cond="${1:-1}"
 479     [ $# -gt 0 ] && shift
 480     awk "
 481         { low = lower = tolower(\$0) }
 482         ${cond} { count++ }
 483         END { print count }
 484     " "$@"
 485 }
 486 
 487 # emit a line with a repeating dash-like symbol in it
 488 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; }
 489 
 490 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring
 491 # the leading data-URI declaration, if present
 492 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; }
 493 
 494 # DECAPitate (lines) emits the first line as is, piping all lines after that
 495 # to the command given, passing all/any arguments/options to it
 496 # decap() {
 497 #     awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }'
 498 # }
 499 
 500 # turn Comma-Separated-Values tables into tab-separated-values tables
 501 # decsv() { xsv fmt -t '\t' "$@"; }
 502 
 503 # DEDUPlicate prevents lines from appearing more than once
 504 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; }
 505 
 506 # dictionary-define the word given, using an online service
 507 define() {
 508     local arg
 509     local gap=0
 510     for arg in "$@"; do
 511         [ "${gap}" -gt 0 ] && printf "\n"
 512         gap=1
 513         printf "\x1b[7m%-80s\x1b[0m\n" "${arg}"
 514         curl -s "dict://dict.org/d:${arg}" | awk '
 515             { gsub(/\r$/, "") }
 516             /^151 / {
 517                 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush()
 518                 next
 519             }
 520             /^[1-9][0-9]{2} / {
 521                 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush()
 522                 next
 523             }
 524             { print; fflush() }
 525         '
 526     done | less -JMKiCRS
 527 }
 528 
 529 # DEcompress GZip-encoded data
 530 # degz() { zcat "$@"; }
 531 
 532 # turn JSON Lines into a proper json array
 533 dejsonl() { jq -s -M "${@:-.}"; }
 534 
 535 # delay lines from the standard-input, waiting the number of seconds given
 536 # for each line, or waiting 1 second by default
 537 # delay() {
 538 #     local seconds="${1:-1}"
 539 #     (
 540 #         IFS="$(printf "\n")"
 541 #         while read -r line; do
 542 #             sleep "${seconds}"
 543 #             printf "%s\n" "${line}"
 544 #         done
 545 #     )
 546 # }
 547 
 548 # expand tabs each into up to the number of space given, or 4 by default
 549 detab() { expand -t "${1:-4}"; }
 550 
 551 # ignore trailing spaces, as well as trailing carriage returns
 552 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
 553 
 554 # turn UTF-16 data into UTF-8
 555 deutf16() { iconv -f utf16 -t utf8 "$@"; }
 556 
 557 # DIVide 2 numbers 3 ways, including the complement
 558 div() {
 559     awk -v a="${1:-1}" -v b="${2:-1}" '
 560         BEGIN {
 561             gsub(/_/, "", a)
 562             gsub(/_/, "", b)
 563             if (a > b) { c = a; a = b; b = c }
 564             c = 1 - a / b
 565             if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c
 566             else printf "%f\n%f\n", a / b, b / a
 567             exit
 568         }'
 569 }
 570 
 571 # get/fetch data from the filename or URI given; named `dog` because dogs can
 572 # `fetch` things for you
 573 # dog() {
 574 #     if [ $# -gt 1 ]; then
 575 #         printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2
 576 #         return 1
 577 #     fi
 578 #
 579 #     if [ -e "$1" ]; then
 580 #         cat "$1"
 581 #         return $?
 582 #     fi
 583 #
 584 #     case "${1:--}" in
 585 #         -) cat -;;
 586 #         file://*|https://*|http://*) curl --show-error -s "$1";;
 587 #         ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";;
 588 #         dict://*|telnet://*) curl --show-error -s "$1";;
 589 #         data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 590 #         *) curl --show-error -s "https://$1";;
 591 #     esac 2> /dev/null || {
 592 #         printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2
 593 #         return 1
 594 #     }
 595 # }
 596 
 597 # emit a line with a repeating dot-like symbol in it
 598 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; }
 599 
 600 # ignore/remove all matched regexes given on all stdin lines
 601 drop() {
 602     awk '
 603         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 604         {
 605             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 606             print; fflush()
 607         }
 608     ' "${@:-\r$}"
 609 }
 610 
 611 # show the current Date and Time
 612 dt() {
 613     printf "\e[32m%s\e[0m  \e[34m%s\e[0m\n" "$(date +'%a %b %d')" "$(date +%T)"
 614 }
 615 
 616 # show the current Date, Time, and a Calendar with the 3 `current` months
 617 dtc() {
 618     # show the current date/time center-aligned
 619     printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
 620         "" "$(date +'%a %b %d')" "$(date +%T)"
 621     # debian linux has a different `cal` app which highlights the day
 622     if [ -e "/usr/bin/ncal" ]; then
 623         ncal -C -3
 624     else
 625         cal -3
 626     fi
 627 }
 628 
 629 # quick alias for `echo`
 630 e() { echo "$@"; }
 631 
 632 # Evaluate Awk expression
 633 ea() {
 634     local expr="${1:-0}"
 635     [ $# -gt 0 ] && shift
 636     awk "BEGIN { print ${expr}; exit }" "$@"
 637 }
 638 
 639 # Extended-mode Grep, enabling its full regex syntax
 640 eg() { grep -E --line-buffered "$@"; }
 641 
 642 # Extended Grep, Recursive Interactive and Plain
 643 # egrip() { ugrep -r -Q --color=never -E "$@"; }
 644 
 645 # show all empty files in a folder, digging recursively
 646 emptyfiles() {
 647     local arg
 648     for arg in "${@:-.}"; do
 649         if [ ! -d "${arg}" ]; then
 650             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 651             return 1
 652         fi
 653         stdbuf -oL find "${arg}" -type f -size 0c
 654     done
 655 }
 656 
 657 # Evaluate Nodejs expression
 658 # en() {
 659 #     local expr="${1:-null}"
 660 #     expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')"
 661 #     node -e "console.log(${expr})" | sed -u 's-\x1b\[[^A-Za-z]+[A-Za-z]--g'
 662 # }
 663 
 664 # Evaluate Python expression
 665 ep() { python -c "print(${1:-None})"; }
 666 
 667 # Extended Plain Interactive Grep
 668 epig() { ugrep --color=never -Q -E "$@"; }
 669 
 670 # Extended Plain Recursive Interactive Grep
 671 eprig() { ugrep --color=never -Q -E "$@"; }
 672 
 673 # Evaluate Ruby expression
 674 er() { ruby -e "puts ${1:-nil}"; }
 675 
 676 # ignore/remove all matched regexes given on all stdin lines
 677 erase() {
 678     awk '
 679         BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } }
 680         {
 681             for (i = 1; i < ARGC; i++) gsub(e[i], "")
 682             print; fflush()
 683         }
 684     ' "${@:-\r$}"
 685 }
 686 
 687 # Editor Read-Only
 688 ero() { micro -readonly true "$@"; }
 689 
 690 # Extended-mode Sed, enabling its full regex syntax
 691 es() { sed -E -u "$@"; }
 692 
 693 # convert EURos into CAnadian Dollars, using the latest official exchange
 694 # rates from the bank of canada; during weekends, the latest rate may be
 695 # from a few days ago; the default amount of euros to convert is 1, when
 696 # not given
 697 eur2cad() {
 698     local site='https://www.bankofcanada.ca/valet/observations/group'
 699     local csv_rates="${site}/FX_RATES_DAILY/csv"
 700     local url
 701     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
 702     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
 703         /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i }
 704         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
 705 }
 706 
 707 # EValuate AWK expression
 708 evawk() {
 709     local expr="${1:-0}"
 710     [ $# -gt 0 ] && shift
 711     awk "BEGIN { print ${expr}; exit }" "$@"
 712 }
 713 
 714 # convert fahrenheit into celsius
 715 fahrenheit() {
 716     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' |
 717         awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }'
 718 }
 719 
 720 # Flushed AWK
 721 fawk() { stdbuf -oL awk "$@"; }
 722 
 723 # fetch/web-request all URIs given, using protcol HTTPS when none is given
 724 fetch() {
 725     local a
 726     for a in "$@"; do
 727         case "$a" in
 728             file://*|https://*|http://*) curl --show-error -s "$a";;
 729             ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";;
 730             dict://*|telnet://*) curl --show-error -s "$a";;
 731             data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;;
 732             *) curl --show-error -s "https://$a";;
 733         esac
 734     done
 735 }
 736 
 737 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings
 738 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; }
 739 
 740 # show all files in a folder, digging recursively
 741 files() {
 742     local arg
 743     for arg in "${@:-.}"; do
 744         if [ ! -d "${arg}" ]; then
 745             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 746             return 1
 747         fi
 748         stdbuf -oL find "${arg}" -type f
 749     done
 750 }
 751 
 752 # recursively find all files with fewer bytes than the number given
 753 filesunder() {
 754     local n
 755     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
 756     [ $# -gt 0 ] && shift
 757 
 758     local arg
 759     for arg in "${@:-.}"; do
 760         if [ ! -d "${arg}" ]; then
 761             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 762             return 1
 763         fi
 764         stdbuf -oL find "${arg}" -type f -size -"$n"c
 765     done
 766 }
 767 
 768 # get the first n lines, or 1 by default
 769 first() { head -n "${1:-1}" "${2:--}"; }
 770 
 771 # limit data up to the first n bytes
 772 firstbytes() { head -c "$1" "${2:--}"; }
 773 
 774 # get the first n lines, or 1 by default
 775 firstlines() { head -n "${1:-1}" "${2:--}"; }
 776 
 777 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
 778 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
 779 # and ensuring each input's last line ends with a line-feed
 780 fixlines() {
 781     awk '
 782         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
 783         { gsub(/\r$/, ""); print; fflush() }
 784     ' "$@"
 785 }
 786 
 787 # FLushed AWK
 788 # flawk() { stdbuf -oL awk "$@"; }
 789 
 790 # First Line AWK, emits the first line as is, and uses the rest of the args
 791 # given by injecting the first into the script, and passing all later args as
 792 # later args to `awk` as given
 793 flawk() {
 794     local code="${1:-1}"
 795     [ $# -gt 0 ] && shift
 796     stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@"
 797 }
 798 
 799 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what
 800 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
 801 # involving several steps
 802 fleak() {
 803     awk '
 804         {
 805             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
 806             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr"
 807             print; fflush()
 808         }
 809     ' "$@"
 810 }
 811 
 812 # try to run the command given using line-buffering for its (standard) output
 813 flushlines() { stdbuf -oL "$@"; }
 814 
 815 # show all folders in a folder, digging recursively
 816 folders() {
 817     local arg
 818     for arg in "${@:-.}"; do
 819         if [ ! -d "${arg}" ]; then
 820             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
 821             return 1
 822         fi
 823         stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }'
 824     done
 825 }
 826 
 827 # start from the line number given, skipping all previous ones
 828 fromline() { tail -n +"${1:-1}" "${2:--}"; }
 829 
 830 # convert FeeT into meters
 831 ft() {
 832     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 833         awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }'
 834 }
 835 
 836 # convert FeeT² (squared) into meters²
 837 ft2() {
 838     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 839         awk '/./ { printf "%.2f\n", 0.09290304 * $0 }'
 840 }
 841 
 842 # Get/fetch data from the filenames/URIs given; uses my script `get`
 843 # g() { get "$@"; }
 844 
 845 # run `grep` in extended-regex mode, enabling its full regex syntax
 846 # g() { grep -E --line-buffered "$@"; }
 847 
 848 # convert GALlons into liters
 849 gal() {
 850     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 851         awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }'
 852 }
 853 
 854 # convert binary GigaBytes into bytes
 855 gb() {
 856     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
 857         awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' |
 858         sed 's-\.00*$--'
 859 }
 860 
 861 # Good, Bad, Meh colors lines using up to 3 regular expressions
 862 gbm() {
 863     local good="$1"
 864     local bad="$2"
 865     local meh="$3"
 866     [ $# -gt 0 ] && shift
 867     [ $# -gt 0 ] && shift
 868     [ $# -gt 0 ] && shift
 869 
 870     awk '
 871         BEGIN {
 872             gotgood = ARGC > 1 && ARGV[1] != ""
 873             gotbad = ARGC > 2 && ARGV[2] != ""
 874             gotmeh = ARGC > 3 && ARGV[3] != ""
 875             good = ARGV[1]
 876             bad = ARGV[2]
 877             meh = ARGV[3]
 878             delete ARGV[1]
 879             delete ARGV[2]
 880             delete ARGV[3]
 881         }
 882 
 883         gotgood && $0 ~ good {
 884             # code to use a color-blind-friendlier blue, instead of green
 885             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;95;215m")
 886             # printf "\x1b[38;2;0;95;215m%s\x1b[0m\n", $0
 887             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;135;95m")
 888             printf "\x1b[38;2;0;135;95m%s\x1b[0m\n", $0; fflush()
 889             next
 890         }
 891 
 892         gotbad && $0 ~ bad {
 893             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;204;0;0m")
 894             printf "\x1b[38;2;204;0;0m%s\x1b[0m\n", $0; fflush()
 895             next
 896         }
 897 
 898         gotmeh && $0 ~ meh {
 899             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 900             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 901             next
 902         }
 903 
 904         { print; fflush() }
 905     ' "${good}" "${bad}" "${meh}" "$@"
 906 }
 907 
 908 # glue/stick together various lines, only emitting a line-feed at the end; an
 909 # optional argument is the output-item-separator, which is empty by default
 910 glue() {
 911     local sep="${1:-}"
 912     [ $# -gt 0 ] && shift
 913     awk -v sep="${sep}" '
 914         NR > 1 { printf "%s", sep }
 915         { gsub(/\r/, ""); printf "%s", $0; fflush() }
 916         END { if (NR > 0) print ""; fflush() }
 917     ' "$@"
 918 }
 919 
 920 # GO Build Stripped: a common use-case for the go compiler
 921 gobs() { go build -ldflags "-s -w" -trimpath "$@"; }
 922 
 923 # GO DEPendencieS: show all dependencies in a go project
 924 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; }
 925 
 926 # GO IMPortS: show all imports in a go project
 927 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; }
 928 
 929 # go to the folder picked using an interactive TUI; uses my script `bf`
 930 goto() {
 931     local where
 932     where="$(bf "${1:-.}")"
 933     if [ $? -ne 0 ]; then
 934         return 0
 935     fi
 936 
 937     where="$(realpath "${where}")"
 938     if [ ! -d "${where}" ]; then
 939         where="$(dirname "${where}")"
 940     fi
 941     cd "${where}" || return
 942 }
 943 
 944 # GRayed-out lines with AWK
 945 grawk() {
 946     local cond="${1:-1}"
 947     [ $# -gt 0 ] && shift
 948     awk "${cond}"' {
 949             gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m")
 950             printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush()
 951             next
 952         }
 953         { print; fflush() }
 954     ' "$@"
 955 }
 956 
 957 # Style lines using a GRAY-colored BACKground
 958 grayback() {
 959     awk '
 960         {
 961             gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m")
 962             printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush()
 963         }
 964     ' "$@"
 965 }
 966 
 967 # Grep, Recursive Interactive and Plain
 968 # grip() { ugrep -r -Q --color=never -E "$@"; }
 969 
 970 # Global extended regex SUBstitute, using the AWK function of the same name:
 971 # arguments are used as regex/replacement pairs, in that order
 972 gsub() {
 973     awk '
 974         BEGIN {
 975             for (i = 1; i < ARGC; i++) {
 976                 args[++n] = ARGV[i]
 977                 delete ARGV[i]
 978             }
 979         }
 980         {
 981             for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1])
 982             print; fflush()
 983         }
 984     ' "$@"
 985 }
 986 
 987 # Highlight (lines) with AWK
 988 hawk() {
 989     local cond="${1:-1}"
 990     [ $# -gt 0 ] && shift
 991     awk '
 992         { low = lower = tolower($0) }
 993         '"${cond}"' {
 994             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
 995             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
 996             next
 997         }
 998         { print; fflush() }
 999     ' "$@"
1000 }
1001 
1002 # play a heartbeat-like sound lasting the number of seconds given, or for 1
1003 # second by default; uses my script `waveout`
1004 heartbeat() {
1005     local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])'
1006     local b='((12, u), (8, (u-0.25)%1))'
1007     local f="sum($a for v in $b) / 2"
1008     # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2'
1009     # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2'
1010     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1011 }
1012 
1013 # Highlighted-style ECHO
1014 hecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1015 
1016 # show each byte as a pair of HEXadecimal (base-16) symbols
1017 hexify() {
1018     cat "$@" | od -x -A n |
1019         awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }'
1020 }
1021 
1022 # HIghlighted-style ECHO
1023 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; }
1024 
1025 # highlight lines
1026 highlight() {
1027     awk '
1028         {
1029             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1030             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1031         }
1032     ' "$@"
1033 }
1034 
1035 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what
1036 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes
1037 # involving several steps
1038 hileak() {
1039     awk '
1040         {
1041             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "")
1042             printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr"
1043             print; fflush()
1044         }
1045     ' "$@"
1046 }
1047 
1048 # highlight lines
1049 hilite() {
1050     awk '
1051         {
1052             gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m")
1053             printf "\x1b[7m%s\x1b[0m\n", $0; fflush()
1054         }
1055     ' "$@"
1056 }
1057 
1058 # Help Me Remember my custom shell commands
1059 hmr() {
1060     local cmd="bat"
1061     # debian linux uses a different name for the `bat` app
1062     if [ -e "/usr/bin/batcat" ]; then
1063         cmd="batcat"
1064     fi
1065 
1066     "$cmd" \
1067         --style=plain,header,numbers --theme='Monokai Extended Light' \
1068         --wrap=never --color=always "$(which clam)" |
1069             sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS
1070 }
1071 
1072 # convert seconds into a colon-separated Hours-Minutes-Seconds triple
1073 hms() {
1074     echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ {
1075         x = $0
1076         h = (x - x % 3600) / 3600
1077         m = (x % 3600) / 60
1078         s = x % 60
1079         printf "%02d:%02d:%05.2f\n", h, m, s; fflush()
1080     }'
1081 }
1082 
1083 # find all hyperlinks inside HREF attributes in the input text
1084 href() {
1085     awk '
1086         BEGIN { e = "href=\"[^\"]+\"" }
1087         {
1088             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1089                 print substr(s, RSTART + 6, RLENGTH - 7); fflush()
1090             }
1091         }
1092     ' "$@"
1093 }
1094 
1095 # Index all lines starting from 0, using a tab right after each line number
1096 # i() {
1097 #     local start="${1:-0}"
1098 #     [ $# -gt 0 ] && shift
1099 #     nl -b a -w 1 -v "${start}" "$@"
1100 # }
1101 
1102 # Index all lines starting from 0, using a tab right after each line number
1103 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; }
1104 
1105 # avoid/ignore lines which case-insensitively match any of the regexes given
1106 iavoid() {
1107     awk '
1108         BEGIN {
1109             if (IGNORECASE == "") {
1110                 m = "this variant of AWK lacks case-insensitive regex-matching"
1111                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1112                 exit 125
1113             }
1114             IGNORECASE = 1
1115 
1116             for (i = 1; i < ARGC; i++) {
1117                 e[i] = ARGV[i]
1118                 delete ARGV[i]
1119             }
1120         }
1121 
1122         {
1123             for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next
1124             print; fflush(); got++
1125         }
1126 
1127         END { exit(got == 0) }
1128     ' "${@:-^\r?$}"
1129 }
1130 
1131 # case-Insensitively DEDUPlicate prevents lines from appearing more than once
1132 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1133 
1134 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1135 idrop() {
1136     awk '
1137         BEGIN {
1138             if (IGNORECASE == "") {
1139                 m = "this variant of AWK lacks case-insensitive regex-matching"
1140                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1141                 exit 125
1142             }
1143             IGNORECASE = 1
1144 
1145             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1146         }
1147 
1148         {
1149             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1150             print; fflush()
1151         }
1152     ' "${@:-\r$}"
1153 }
1154 
1155 # ignore/remove all case-insensitively matched regexes given on all stdin lines
1156 ierase() {
1157     awk '
1158         BEGIN {
1159             if (IGNORECASE == "") {
1160                 m = "this variant of AWK lacks case-insensitive regex-matching"
1161                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1162                 exit 125
1163             }
1164             IGNORECASE = 1
1165 
1166             for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] }
1167         }
1168 
1169         {
1170             for (i = 1; i < ARGC; i++) gsub(e[i], "")
1171             print; fflush()
1172         }
1173     ' "${@:-\r$}"
1174 }
1175 
1176 # ignore command in a pipe: this allows quick re-editing of pipes, while
1177 # still leaving signs of previously-used steps, as a memo
1178 ignore() { cat; }
1179 
1180 # only keep lines which case-insensitively match any of the regexes given
1181 imatch() {
1182     awk '
1183         BEGIN {
1184             if (IGNORECASE == "") {
1185                 m = "this variant of AWK lacks case-insensitive regex-matching"
1186                 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr"
1187                 exit 125
1188             }
1189             IGNORECASE = 1
1190 
1191             for (i = 1; i < ARGC; i++) {
1192                 e[i] = ARGV[i]
1193                 delete ARGV[i]
1194             }
1195         }
1196 
1197         {
1198             for (i = 1; i < ARGC; i++) {
1199                 if ($0 ~ e[i]) {
1200                     print; fflush()
1201                     got++
1202                     next
1203                 }
1204             }
1205         }
1206 
1207         END { exit(got == 0) }
1208     ' "${@:-[^\r]}"
1209 }
1210 
1211 # emit each word-like item from each input line on its own line
1212 items() { awk '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"; }
1213 
1214 # case-insensitively deduplicate lines, keeping them in their original order:
1215 # the checking/matching is case-insensitive, but each first match is output
1216 # exactly as is
1217 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; }
1218 
1219 # shrink/compact Json data, allowing an optional filepath
1220 # j0() { python -m json.tool --compact "${1:--}"; }
1221 
1222 # shrink/compact Json using the `jq` app, allowing an optional filepath, and
1223 # even an optional transformation formula after that
1224 # j0() { jq -c -M "${2:-.}" "${1:--}"; }
1225 
1226 # show Json data on multiple lines, using 2 spaces for each indentation level,
1227 # allowing an optional filepath
1228 # j2() { python -m json.tool --indent 2 "${1:--}"; }
1229 
1230 # show Json data on multiple lines, using 2 spaces for each indentation level,
1231 # allowing an optional filepath, and even an optional transformation formula
1232 # after that
1233 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1234 
1235 # listen to streaming JAZZ music
1236 jazz() {
1237     printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n"
1238     # mpv https://stream.zeno.fm/00rt0rdm7k8uv
1239     mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv
1240 }
1241 
1242 # show a `dad` JOKE from the web, sometimes even a very funny one
1243 joke() {
1244     curl -s https://icanhazdadjoke.com | fold -s | sed -u -E 's- *\r?$--'
1245     # plain-text output from previous cmd doesn't end with a line-feed
1246     printf "\n"
1247 }
1248 
1249 # shrink/compact JSON data, allowing an optional filepath
1250 # json0() { python -m json.tool --compact "${1:--}"; }
1251 
1252 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and
1253 # even an optional transformation formula after that
1254 json0() { jq -c -M "${2:-.}" "${1:--}"; }
1255 
1256 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1257 # allowing an optional filepath
1258 # json2() { python -m json.tool --indent 2 "${1:--}"; }
1259 
1260 # show JSON data on multiple lines, using 2 spaces for each indentation level,
1261 # allowing an optional filepath, and even an optional transformation formula
1262 # after that
1263 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; }
1264 
1265 # turn JSON Lines into a proper JSON array
1266 jsonl2json() { jq -s -M "${@:-.}"; }
1267 
1268 # emit the given number of random/junk bytes, or 1024 junk bytes by default
1269 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; }
1270 
1271 # only keep the file-extension part from lines ending with file-extensions
1272 # justext() {
1273 #     awk '
1274 #         !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() }
1275 #     ' "$@"
1276 # }
1277 
1278 # only keep the file-extension part from lines ending with file-extensions
1279 justext() {
1280     awk '
1281         !/^\./ && /\./ {
1282             if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) {
1283                 print substr($0, RSTART, RLENGTH); fflush()
1284             }
1285         }
1286     ' "$@"
1287 }
1288 
1289 # only keep lines ending with a file-extension of any popular picture format
1290 justpictures() {
1291     awk '
1292         /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ {
1293             gsub(/ *\r?$/, ""); print; fflush()
1294         }
1295     ' "$@"
1296 }
1297 
1298 # only keep lines ending with a file-extension of any popular sound format
1299 justsounds() {
1300     awk '
1301         /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ {
1302             gsub(/ *\r?$/, ""); print; fflush()
1303         }
1304     ' "$@"
1305 }
1306 
1307 # only keep lines ending with a file-extension of any popular video format
1308 justvideos() {
1309     awk '
1310         /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ {
1311             gsub(/ *\r?$/, ""); print; fflush()
1312         }
1313     ' "$@"
1314 }
1315 
1316 # convert binary KiloBytes into bytes
1317 kb() {
1318     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1319         awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' |
1320         sed 's-\.00*$--'
1321 }
1322 
1323 # run `less`, showing line numbers, among other settings
1324 l() { less -JMKNiCRS "$@"; }
1325 
1326 # Like A Book groups lines as 2 side-by-side pages, the same way books
1327 # do it; uses my script `book`
1328 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; }
1329 
1330 # find the LAN (local-area network) IP address for this device
1331 lanip() { hostname -I; }
1332 
1333 # Line xARGS: `xargs` using line separators, which handles filepaths
1334 # with spaces, as long as the standard input has 1 path per line
1335 largs() { xargs -d '\n' "$@"; }
1336 
1337 # get the last n lines, or 1 by default
1338 # last() { tail -n "${1:-1}" "${2:--}"; }
1339 
1340 # get up to the last given number of bytes
1341 lastbytes() { tail -c "${1:-1}" "${2:--}"; }
1342 
1343 # get the last n lines, or 1 by default
1344 lastlines() { tail -n "${1:-1}" "${2:--}"; }
1345 
1346 # turn UTF-8 into its latin-like subset, where variants of latin letters stay
1347 # as given, and where all other symbols become question marks, one question
1348 # mark for each code-point byte
1349 latinize() {
1350     iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8
1351 }
1352 
1353 # Lowercased (lines) AWK
1354 lawk() {
1355     local code="${1:-1}"
1356     [ $# -gt 0 ] && shift
1357     awk "
1358         {
1359             line = orig = original = \$0
1360             low = lower = tolower(\$0)
1361             \$0 = lower
1362         }
1363         ${code}
1364         { fflush() }
1365     " "$@";
1366 }
1367 
1368 # convert pounds (LB) into kilograms
1369 lb() {
1370     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1371         awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }'
1372 }
1373 
1374 # turn the first n space-separated fields on each line into tab-separated
1375 # ones; this behavior is useful to make the output of many cmd-line tools
1376 # into TSV, since filenames are usually the last fields, and these may
1377 # contain spaces which aren't meant to be split into different fields
1378 leadtabs() {
1379     local n="$1"
1380     local cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")"
1381     cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')"
1382     sed -u -E "${cmd}"
1383 }
1384 
1385 # run `less`, showing line numbers, among other settings
1386 least() { less -JMKNiCRS "$@"; }
1387 
1388 # limit stops at the first n bytes, or 1024 bytes by default
1389 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; }
1390 
1391 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps,
1392 # and using the first n lines as a sticky-header (1 by default), so they
1393 # always show on top
1394 lh() {
1395     local n="${1:-1}"
1396     [ $# -gt 0 ] && shift
1397     less --header="$n" -JMKNiCRS "$@"
1398 }
1399 
1400 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's
1401 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds,
1402 # and ensuring each input's last line ends with a line-feed
1403 lines() {
1404     awk '
1405         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1406         { gsub(/\r$/, ""); print; fflush() }
1407     ' "$@"
1408 }
1409 
1410 # regroup adjacent lines into n-item tab-separated lines
1411 lineup() {
1412     local n="${1:-0}"
1413     [ $# -gt 0 ] && shift
1414 
1415     if [ "$n" -le 0 ]; then
1416         awk '
1417             NR > 1 { printf "\t" }
1418             { printf "%s", $0; fflush() }
1419             END { if (NR > 0) print "" }
1420         ' "$@"
1421         return $?
1422     fi
1423 
1424     awk -v n="$n" '
1425         NR % n != 1 && n > 1 { printf "\t" }
1426         { printf "%s", $0; fflush() }
1427         NR % n == 0 { print ""; fflush() }
1428         END { if (NR % n != 0) print "" }
1429     ' "$@"
1430 }
1431 
1432 # find all hyperLINKS (https:// and http://) in the input text
1433 links() {
1434     awk '
1435         BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" }
1436         {
1437             # match all links in the current line
1438             for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) {
1439                 print substr(s, RSTART, RLENGTH); fflush()
1440             }
1441         }
1442     ' "$@"
1443 }
1444 
1445 # List files, using the `Long` option
1446 # ll() { ls -l "$@"; }
1447 
1448 # LOAD data from the filename or URI given; uses my script `get`
1449 load() { get "$@"; }
1450 
1451 # LOwercase line, check (awk) COndition: on each success, the original line
1452 # is output with its original letter-casing, as its lower-cased version is
1453 # only a convenience meant for the condition
1454 loco() {
1455     local cond="${1:-1}"
1456     [ $# -gt 0 ] && shift
1457     awk "
1458         {
1459             line = orig = original = \$0
1460             low = lower = tolower(\$0)
1461             \$0 = lower
1462         }
1463         ${cond} { print line; fflush() }
1464     " "$@"
1465 }
1466 
1467 # LOcal SERver webserves files in a folder as localhost, using the port
1468 # number given, or port 8080 by default
1469 loser() {
1470     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
1471     python3 -m http.server "${1:-8080}" -d "${2:-.}"
1472 }
1473 
1474 # LOWercase all ASCII symbols
1475 low() { awk '{ print tolower($0); fflush() }' "$@"; }
1476 
1477 # LOWERcase all ASCII symbols
1478 lower() { awk '{ print tolower($0); fflush() }' "$@"; }
1479 
1480 # Live/Line-buffered RipGrep ensures results show/pipe up immediately
1481 lrg() { rg --line-buffered "$@"; }
1482 
1483 # Listen To Youtube
1484 lty() {
1485     local url
1486     # some youtube URIs end with extra playlist/tracker parameters
1487     url="$(echo "$1" | sed 's-&.*--')"
1488     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1489 }
1490 
1491 # Match lines with any of the regexes given
1492 m() {
1493     awk '
1494         BEGIN {
1495             for (i = 1; i < ARGC; i++) {
1496                 e[i] = ARGV[i]
1497                 delete ARGV[i]
1498             }
1499         }
1500 
1501         {
1502             for (i = 1; i < ARGC; i++) {
1503                 if ($0 ~ e[i]) {
1504                     print; fflush()
1505                     got++
1506                     next
1507                 }
1508             }
1509         }
1510 
1511         END { exit(got == 0) }
1512     ' "${@:-[^\r]}"
1513 }
1514 
1515 # only keep lines which match any of the regexes given
1516 match() {
1517     awk '
1518         BEGIN {
1519             for (i = 1; i < ARGC; i++) {
1520                 e[i] = ARGV[i]
1521                 delete ARGV[i]
1522             }
1523         }
1524 
1525         {
1526             for (i = 1; i < ARGC; i++) {
1527                 if ($0 ~ e[i]) {
1528                     print; fflush()
1529                     got++
1530                     next
1531                 }
1532             }
1533         }
1534 
1535         END { exit(got == 0) }
1536     ' "${@:-[^\r]}"
1537 }
1538 
1539 # MAX Width truncates lines up to the given number of items/bytes given, or up
1540 # to 80 by default; output lines end with an ANSI reset-code, in case input
1541 # lines use ANSI styles
1542 maxw() {
1543     local maxwidth="${1:-80}"
1544     [ $# -gt 0 ] && shift
1545     awk -v maxw="${maxwidth}" '
1546         {
1547             gsub(/\r$/, "")
1548             printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush()
1549         }
1550     ' "$@"
1551 }
1552 
1553 # convert binary MegaBytes into bytes
1554 mb() {
1555     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1556         awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' |
1557         sed 's-\.00*$--'
1558 }
1559 
1560 # Multi-Core MAKE runs `make` using all cores
1561 mcmake() { make -j "$(nproc)" "$@"; }
1562 
1563 # Multi-Core MaKe runs `make` using all cores
1564 mcmk() { make -j "$(nproc)" "$@"; }
1565 
1566 # merge stderr into stdout, without any ugly keyboard-dancing
1567 # merrge() { "$@" 2>&1; }
1568 
1569 # convert MIles into kilometers
1570 mi() {
1571     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1572         awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }'
1573 }
1574 
1575 # convert MIles² (squared) into kilometers²
1576 mi2() {
1577     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1578         awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }'
1579 }
1580 
1581 # Make In Folder
1582 mif() {
1583     local code
1584     pushd "${1:-.}" > /dev/null || return
1585     [ $# -gt 0 ] && shift
1586     make "$@"
1587     code=$?
1588     popd > /dev/null || return "${code}"
1589     return "${code}"
1590 }
1591 
1592 # Media INFO
1593 # minfo() { mediainfo "$@" | less -JMKiCRS; }
1594 
1595 # Media INFO
1596 # minfo() { ffprobe "$@" |& less -JMKiCRS; }
1597 
1598 # quick alias for `make`
1599 # mk() { make "$@"; }
1600 
1601 # run `make`
1602 mk() { make "$@"; }
1603 
1604 # convert Miles Per Hour into kilometers per hour
1605 mph() {
1606     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1607         awk '/./ { printf "%.2f\n", 1.609344 * $0 }'
1608 }
1609 
1610 # Number all lines, using a tab right after each line number
1611 # n() {
1612 #     local start="${1:-1}"
1613 #     [ $# -gt 0 ] && shift
1614 #     nl -b a -w 1 -v "${start}" "$@"
1615 # }
1616 
1617 # Number all lines, using a tab right after each line number
1618 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; }
1619 
1620 # Not AND sorts its 2 inputs, then finds lines not in common
1621 nand() {
1622     # comm -3 <(sort "$1") <(sort "$2")
1623     # dash doesn't support the process-sub syntax
1624     (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
1625 }
1626 
1627 # Nice Byte Count, using my scripts `nn` and `cext`
1628 nbc() { wc -c "$@" | nn --gray | cext; }
1629 
1630 # NIce(r) COlumns makes the output of many commands whose output starts with
1631 # a header line easier to read; uses my script `nn`
1632 nico() {
1633     awk '
1634         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1635         { printf "%5d  %s\n", NR - 1, $0; fflush() }
1636     ' "$@" | nn --gray | less -JMKiCRS
1637 }
1638 
1639 # emit nothing to output and/or discard everything from input
1640 nil() {
1641     if [ $# -gt 0 ]; then
1642         "$@" > /dev/null
1643     else
1644         cat < /dev/null
1645     fi
1646 }
1647 
1648 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers)
1649 njnn() { nj "$@" | nn --gray; }
1650 
1651 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1652 naman() {
1653     local w
1654     w="$(tput cols)"
1655     if [ "$w" -gt 120 ]; then
1656         w="$((w / 2 - 1))"
1657     fi
1658     MANWIDTH="$w" man "$@"
1659 }
1660 
1661 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run
1662 nman() {
1663     local w
1664     w="$(tput cols)"
1665     if [ "$w" -gt 120 ]; then
1666         w="$((w / 2 - 1))"
1667     fi
1668     MANWIDTH="$w" man "$@"
1669 }
1670 
1671 # convert Nautical MIles into kilometers
1672 nmi() {
1673     echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' |
1674         awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }'
1675 }
1676 
1677 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing
1678 # noerr() { "$@" 2> /dev/null; }
1679 
1680 # play a white-noise sound lasting the number of seconds given, or for 1
1681 # second by default; uses my script `waveout`
1682 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; }
1683 
1684 # show the current date and time
1685 now() { date +'%Y-%m-%d %H:%M:%S'; }
1686 
1687 # Nice Processes shows/lists all current processes; uses my script `nn`
1688 np() {
1689     local res
1690     local code
1691     # res="$(ps "${@:-auxf}")"
1692     res="$(ps "${@:-aux}")"
1693     code=$?
1694     if [ "${code}" -ne 0 ]; then
1695         return "${code}"
1696     fi
1697 
1698     echo "${res}" | awk '
1699         BEGIN {
1700             d = strftime("%a %b %d")
1701             t = strftime("%H:%M:%S")
1702             # printf "%s  %s\n\n", d, t
1703             # printf "\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", d, t
1704             # printf "%30s\x1b[32m%s\x1b[0m  \x1b[34m%s\x1b[0m\n\n", "", d, t
1705             # printf "%30s%s  %s\n\n", "", d, t
1706             printf "\x1b[7m%30s%s  %s%30s\x1b[0m\n\n", "", d, t, ""
1707         }
1708 
1709         (NR - 1) % 5 == 1 && NR > 1 { print "" }
1710 
1711         $1 == "root" {
1712             # gsub(/^/, "\x1b[36m")
1713             # gsub(/\x1b\[0m/, "\x1b[0m\x1b[36m")
1714             gsub(/^/, "\x1b[34m")
1715             gsub(/ +/, "&\x1b[0m\x1b[34m")
1716             gsub(/$/, "\x1b[0m")
1717         }
1718 
1719         {
1720             gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m")
1721             gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m")
1722             printf "%3d  %s\n", NR - 1, $0
1723         }
1724     ' | nn --gray | less -JMKiCRS
1725 }
1726 
1727 # Nice Size, using my scripts `nn` and `cext`
1728 ns() { wc -c "$@" | nn --gray | cext; }
1729 
1730 # Nice Transform Json, using my scripts `tj`, and `nj`
1731 ntj() { tj "$@" | nj; }
1732 
1733 # Nice TimeStamp
1734 nts() {
1735     ts '%Y-%m-%d %H:%M:%S' |
1736         sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2'
1737 }
1738 
1739 # emit nothing to output and/or discard everything from input
1740 null() {
1741     if [ $# -gt 0 ]; then
1742         "$@" > /dev/null
1743     else
1744         cat < /dev/null
1745     fi
1746 }
1747 
1748 # NULl-terminate LINES ends each stdin line with a null byte, instead of a
1749 # line-feed byte
1750 nullines() {
1751     awk '
1752         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
1753         { gsub(/\r$/, ""); printf "%s\x00", $0; fflush() }
1754     ' "$@"
1755 }
1756 
1757 # (Nice) What Are These (?) shows what the names given to it are/do, coloring
1758 # the syntax of shell functions
1759 nwat() {
1760     local a
1761 
1762     if [ $# -eq 0 ]; then
1763         printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr
1764         return 1
1765     fi
1766 
1767     local cmd="bat"
1768     # debian linux uses a different name for the `bat` app
1769     if [ -e "/usr/bin/batcat" ]; then
1770         cmd="batcat"
1771     fi
1772 
1773     for a in "$@"; do
1774         # printf "\e[7m%-80s\e[0m\n" "$a"
1775         printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
1776 
1777         # resolve 1 alias level
1778         if alias "$a" 2> /dev/null > /dev/null; then
1779             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
1780         fi
1781 
1782         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
1783             # resolved aliases with args/spaces in them would otherwise fail
1784             echo "$a"
1785         elif whence -f "$a" > /dev/null 2> /dev/null; then
1786             # zsh seems to show a shell function's code only via `whence -f`
1787             whence -f "$a"
1788         elif type "$a" > /dev/null 2> /dev/null; then
1789             # dash doesn't support `declare`, and `type` in bash emits
1790             # a redundant first output line, when it's a shell function
1791             type "$a" | awk '
1792                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
1793                 { print; fflush() }
1794                 END { if (NR < 2 && skipped) print skipped }
1795             ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \
1796                 --wrap=never --color=always |
1797                     sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g'
1798         else
1799             printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a"
1800         fi
1801     done | less -JMKiCRS
1802 }
1803 
1804 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1805 # alternating styles to make long numbers easier to read
1806 # nwc() { wc "$@" | nn --gray; }
1807 
1808 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`,
1809 # alternating styles to make long numbers easier to read
1810 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; }
1811 
1812 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and
1813 # `cext`, alternating styles to make long numbers easier to read
1814 nwc() {
1815     wc "$@" | sort -rn | nn --gray | cext |
1816         awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }'
1817 }
1818 
1819 # Nice Zoom Json, using my scripts `zj`, and `nj`
1820 nzj() { zj "$@" | nj; }
1821 
1822 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1823 # pawk() { awk -F='' -v RS='' "$@"; }
1824 
1825 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode
1826 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; }
1827 
1828 # Plain `fd`
1829 pfd() { fd --color=never "$@"; }
1830 
1831 # pick lines, using all the 1-based line-numbers given
1832 picklines() {
1833     awk '
1834         BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 }
1835         BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } }
1836         { l[++n] = $0 }
1837         END {
1838             for (i = 1; i <= m; i++) {
1839                 j = p[i]
1840                 if (j < 0) j += NR + 1
1841                 if (0 < j && j <= NR) print l[j]
1842             }
1843         }
1844     ' "$@"
1845 }
1846 
1847 # Plain Interactive Grep
1848 pig() { ugrep --color=never -Q -E "$@"; }
1849 
1850 # make text plain, by ignoring ANSI terminal styling
1851 # plain() {
1852 #     awk '
1853 #         {
1854 #             gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers
1855 #             print; fflush()
1856 #         }
1857 #     ' "$@"
1858 # }
1859 
1860 # end all lines with an ANSI-code to reset styles
1861 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1862 
1863 # end all lines with an ANSI-code to reset styles
1864 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; }
1865 
1866 # play audio/video media
1867 # play() { mplayer -msglevel all=-1 "${@:--}"; }
1868 
1869 # play audio/video media
1870 play() { mpv "${@:--}"; }
1871 
1872 # Pick LINE, using the 1-based line-number given
1873 pline() {
1874     local line="$1"
1875     [ $# -gt 0 ] && shift
1876     awk -v n="${line}" '
1877         BEGIN { if (n < 1) exit 0 }
1878         NR == n { print; exit 0 }
1879     ' "$@"
1880 }
1881 
1882 # Paused MPV; especially useful when trying to view pictures via `mpv`
1883 pmpv() { mpv --pause "${@:--}"; }
1884 
1885 # Print Python result
1886 pp() { python -c "print($1)"; }
1887 
1888 # PRecede (input) ECHO, prepends a first line to stdin lines
1889 precho() { echo "$@" && cat /dev/stdin; }
1890 
1891 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines
1892 prememo() {
1893     awk '
1894         BEGIN {
1895             if (ARGC > 1) printf "\x1b[7m"
1896             for (i = 1; i < ARGC; i++) {
1897                 if (i > 1) printf " "
1898                 printf "%s", ARGV[i]
1899                 delete ARGV[i]
1900             }
1901             if (ARGC > 1) printf "\x1b[0m\n"
1902             fflush()
1903         }
1904         { print; fflush() }
1905     ' "$@"
1906 }
1907 
1908 # start by joining all arguments given as a tab-separated-items line of output,
1909 # followed by all lines from stdin verbatim
1910 pretsv() {
1911     awk '
1912         BEGIN {
1913             for (i = 1; i < ARGC; i++) {
1914                 if (i > 1) printf "\t"
1915                 printf "%s", ARGV[i]
1916                 delete ARGV[i]
1917             }
1918             if (ARGC > 1) printf "\n"
1919             fflush()
1920         }
1921         { print; fflush() }
1922     ' "$@"
1923 }
1924 
1925 # Plain Recursive Interactive Grep
1926 prig() { ugrep --color=never -r -Q -E "$@"; }
1927 
1928 # show/list all current processes
1929 processes() {
1930     local res
1931     res="$(ps aux)"
1932     echo "${res}" | awk 1 | sed -E -u \
1933         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \
1934         -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1'
1935 }
1936 
1937 # Play Youtube Audio
1938 pya() {
1939     local url
1940     # some youtube URIs end with extra playlist/tracker parameters
1941     url="$(echo "$1" | sed 's-&.*--')"
1942     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
1943 }
1944 
1945 # Quiet ignores stderr, without any ugly keyboard-dancing
1946 q() { "$@" 2> /dev/null; }
1947 
1948 # Quiet MPV
1949 qmpv() { mpv --quiet "${@:--}"; }
1950 
1951 # ignore stderr, without any ugly keyboard-dancing
1952 quiet() { "$@" 2> /dev/null; }
1953 
1954 # Reset the screen, which empties it and resets the current style
1955 r() { reset; }
1956 
1957 # keep only lines between the 2 line numbers given, inclusively
1958 rangelines() {
1959     { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] &&
1960         { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; }
1961 }
1962 
1963 # RANdom MANual page
1964 ranman() {
1965     find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename |
1966         sed 's-\.gz$--' | xargs man
1967 }
1968 
1969 # Run AWK expression
1970 rawk() {
1971     local expr="${1:-0}"
1972     [ $# -gt 0 ] && shift
1973     awk "BEGIN { print ${expr}; exit }" "$@"
1974 }
1975 
1976 # play a ready-phone-line sound lasting the number of seconds given, or for 1
1977 # second by default; uses my script `waveout`
1978 ready() {
1979     local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)'
1980     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
1981 }
1982 
1983 # reflow/trim lines of prose (text) to improve its legibility: it's especially
1984 # useful when the text is pasted from web-pages being viewed in reader mode
1985 reprose() {
1986     local w="${1:-80}"
1987     [ $# -gt 0 ] && shift
1988     awk 'FNR == 1 && NR > 1 { print "" } { print; fflush() }' "$@" |
1989         fold -s -w "$w" | sed -u -E 's- *\r?$--'
1990 }
1991 
1992 # ignore ansi styles from stdin and restyle things using the style-name given;
1993 # uses my script `style`
1994 restyle() { style "$@"; }
1995 
1996 # change the tab-title on your terminal app
1997 retitle() { printf "\e]0;%s\a\n" "$*"; }
1998 
1999 # REVerse-order SIZE (byte-count)
2000 revsize() { wc -c "$@" | sort -rn; }
2001 
2002 # Run In Folder
2003 rif() {
2004     local code
2005     pushd "${1:-.}" > /dev/null || return
2006     [ $# -gt 0 ] && shift
2007     "$@"
2008     code=$?
2009     popd > /dev/null || return "${code}"
2010     return "${code}"
2011 }
2012 
2013 # play a ringtone-style sound lasting the number of seconds given, or for 1
2014 # second by default; uses my script `waveout`
2015 ringtone() {
2016     local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))'
2017     waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet -
2018 }
2019 
2020 # Read-Only Micro (text editor)
2021 rom() { micro -readonly true "$@"; }
2022 
2023 # run the command given, trying to turn its output into TSV (tab-separated
2024 # values); uses my script `dejson`
2025 rtab() { jc "$@" | dejson; }
2026 
2027 # Right TRIM ignores trailing spaces, as well as trailing carriage returns
2028 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2029 
2030 # show a RULER-like width-measuring line
2031 ruler() {
2032     [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \
2033         's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-'
2034 }
2035 
2036 # run the command given, trying to turn its output into TSV (tab-separated
2037 # values); uses my script `dejson`
2038 runtab() { jc "$@" | dejson; }
2039 
2040 # run the command given, trying to turn its output into TSV (tab-separated
2041 # values); uses my script `dejson`
2042 runtsv() { jc "$@" | dejson; }
2043 
2044 # Reverse-order WC
2045 rwc() { wc "$@" | sort -rn; }
2046 
2047 # extended-mode Sed, enabling its full regex syntax
2048 # s() { sed -E -u "$@"; }
2049 
2050 # Silent CURL spares you the progress bar, but still tells you about errors
2051 scurl() { curl --show-error -s "$@"; }
2052 
2053 # show a unique-looking SEParator line; useful to run between commands
2054 # which output walls of text
2055 sep() {
2056     [ "${1:-80}" -gt 0 ] &&
2057         printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g'
2058 }
2059 
2060 # webSERVE files in a folder as localhost, using the port number given, or
2061 # port 8080 by default
2062 serve() {
2063     printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2
2064     python3 -m http.server "${1:-8080}" -d "${2:-.}"
2065 }
2066 
2067 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input
2068 setdiff() {
2069     # comm -23 <(sort "$1") <(sort "$2")
2070     # dash doesn't support the process-sub syntax
2071     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2072 }
2073 
2074 # SET INtersection, sorts its 2 inputs, then finds common lines
2075 setin() {
2076     # comm -12 <(sort "$1") <(sort "$2")
2077     # dash doesn't support the process-sub syntax
2078     (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2079 }
2080 
2081 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input
2082 setsub() {
2083     # comm -23 <(sort "$1") <(sort "$2")
2084     # dash doesn't support the process-sub syntax
2085     (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0)
2086 }
2087 
2088 # Show Files (and folders), coloring folders and links; uses my script `nn`
2089 sf() {
2090     ls -al --file-type --color=never --time-style iso "$@" | awk '
2091         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2092         {
2093             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2094             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2095             printf "%6d  %s\n", NR - 1, $0; fflush()
2096         }
2097     ' | nn --gray | less -JMKiCRS
2098 }
2099 
2100 # Show Files (and folders) Plus, by coloring folders, links, and extensions;
2101 # uses my scripts `nn` and `cext`
2102 sfp() {
2103     ls -al --file-type --color=never --time-style iso "$@" | awk '
2104         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2105         {
2106             gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m")
2107             gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m")
2108             printf "%6d  %s\n", NR - 1, $0; fflush()
2109         }
2110     ' | nn --gray | cext | less -JMKiCRS
2111 }
2112 
2113 # Show File Sizes, using my scripts `nn` and `cext`
2114 sfs() {
2115     # turn arg-list into single-item lines
2116     printf "%s\n" "$@" |
2117     # calculate file-sizes, and reverse-sort results
2118     xargs -d '\n' wc -c | sort -rn |
2119     # add/realign fields to improve legibility
2120     awk '
2121         # start output with a header-like line, and add a MiB field
2122         BEGIN { printf "%6s  %10s  %8s  name\n", "n", "bytes", "MiB"; fflush() }
2123         # make table breathe with empty lines, so tall outputs are readable
2124         (NR - 1) % 5 == 1 && NR > 1 { print "" }
2125         # emit regular output lines
2126         {
2127             printf "%6d  %10d  %8.2f  ", NR - 1, $1, $1 / 1048576
2128             # first field is likely space-padded
2129             gsub(/^ */, "")
2130             # slice line after the first field, as filepaths can have spaces
2131             $0 = substr($0, length($1) + 1)
2132             # first field is likely space-padded
2133             gsub(/^ /, "")
2134             printf "%s\n", $0; fflush()
2135         }
2136     ' |
2137     # make zeros in the MiB field stand out with a special color
2138     awk '
2139         {
2140             gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m")
2141             print; fflush()
2142         }
2143     ' |
2144     # make numbers nice, alternating styles along 3-digit groups
2145     nn --gray |
2146     # color-code file extensions
2147     cext |
2148     # make result interactively browsable
2149     less -JMKiCRS
2150 }
2151 
2152 # SHell-run AWK output
2153 # shawk() { stdbuf -oL awk "$@" | sh; }
2154 
2155 # time-run various tools given one-per-line from stdin, giving them extra
2156 # common arguments passed as explicit arguments
2157 showdown() {
2158     awk '
2159         BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } }
2160         {
2161             printf "%s", $0
2162             for (i = 1; i < ARGC; i++) printf " %s", a[i]
2163             printf "\n"; fflush()
2164         }
2165     ' "$@" | xargs -d '\n' hyperfine --style full
2166 }
2167 
2168 # SHOW a command, then RUN it
2169 showrun() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; }
2170 
2171 # clean the screen, after running the command given
2172 sideshow() { tput smcup; "$@"; tput rmcup; }
2173 
2174 # skip the first n lines, or the 1st line by default
2175 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2176 
2177 # skip the first n bytes
2178 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; }
2179 
2180 # skip the last n lines, or the last line by default
2181 skiplast() { head -n -"${1:-1}" "${2:--}"; }
2182 
2183 # skip the last n bytes
2184 skiplastbytes() { head -c -"$1" "${2:--}"; }
2185 
2186 # skip the last n lines, or the last line by default
2187 skiplastlines() { head -n -"${1:-1}" "${2:--}"; }
2188 
2189 # skip the first n lines, or the 1st line by default
2190 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; }
2191 
2192 # SLOW/delay lines from the standard-input, waiting the number of seconds
2193 # given for each line, or waiting 1 second by default
2194 slow() {
2195     local seconds="${1:-1}"
2196     (
2197         IFS="$(printf "\n")"
2198         while read -r line; do
2199             sleep "${seconds}"
2200             printf "%s\n" "${line}"
2201         done
2202     )
2203 }
2204 
2205 # Show Latest Podcasts, using my scripts `podfeed` and `si`
2206 slp() {
2207     local title
2208     title="Latest Podcast Episodes as of $(date +'%F %T')"
2209     podfeed -title "${title}" "$@" | si
2210 }
2211 
2212 # recursively find all files with fewer bytes than the number given
2213 smallfiles() {
2214     local n
2215     n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')"
2216     [ $# -gt 0 ] && shift
2217 
2218     local arg
2219     for arg in "${@:-.}"; do
2220         if [ ! -d "${arg}" ]; then
2221             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2222             return 1
2223         fi
2224         stdbuf -oL find "${arg}" -type f -size -"$n"c
2225     done
2226 }
2227 
2228 # emit the first line as is, sorting all lines after that, using the
2229 # `sort` command, passing all/any arguments/options to it
2230 sortrest() {
2231     awk -v sort="sort $*" '
2232         { gsub(/\r$/, "") }
2233         NR == 1 { print; fflush() }
2234         NR > 1 { print | sort }
2235     '
2236 }
2237 
2238 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after
2239 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2240 # all/any arguments/options to it
2241 sortsv() {
2242     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2243         { gsub(/\r$/, "") }
2244         NR == 1 { print; fflush() }
2245         NR > 1 { print | sort }
2246     '
2247 }
2248 
2249 # emit a line with the number of spaces given in it
2250 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; }
2251 
2252 # ignore leading spaces, trailing spaces, even runs of multiple spaces
2253 # in the middle of lines, as well as trailing carriage returns
2254 squeeze() {
2255     awk '
2256         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2257         {
2258             gsub(/^ +| *\r?$/, "")
2259             gsub(/ *\t */, "\t")
2260             gsub(/  +/, " ")
2261             print; fflush()
2262         }
2263     ' "$@"
2264 }
2265 
2266 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs
2267 # of multiple spaces in the middle of lines, as well as trailing carriage
2268 # returns, while also turning runs of empty lines into single empty lines,
2269 # and ignoring leading/trailing empty lines, effectively also `squeezing`
2270 # lines vertically
2271 squomp() {
2272     awk '
2273         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2274         /^\r?$/ { empty = 1; next }
2275         empty { if (n > 0) print ""; empty = 0 }
2276         {
2277             gsub(/^ +| *\r?$/, "")
2278             gsub(/ *\t */, "\t")
2279             gsub(/  +/, " ")
2280             print; fflush()
2281             n++
2282         }
2283     ' "$@"
2284 }
2285 
2286 # Show a command, then Run it
2287 sr() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; }
2288 
2289 # turn runs of empty lines into single empty lines, effectively squeezing
2290 # paragraphs vertically, so to speak; runs of empty lines both at the start
2291 # and at the end are ignored
2292 stomp() {
2293     awk '
2294         /^\r?$/ { empty = 1; next }
2295         empty { if (n > 0) print ""; empty = 0 }
2296         { print; fflush(); n++ }
2297     ' "$@"
2298 }
2299 
2300 # STRike-thru (lines) with AWK
2301 strawk() {
2302     local cond="${1:-1}"
2303     [ $# -gt 0 ] && shift
2304     awk '
2305         { low = lower = tolower($0) }
2306         '"${cond}"' {
2307             gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m")
2308             printf "\x1b[9m%s\x1b[0m\n", $0; fflush()
2309             next
2310         }
2311         { print; fflush() }
2312     ' "$@"
2313 }
2314 
2315 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after
2316 # that, using the `sort` command in TSV (tab-separated values) mode, passing
2317 # all/any arguments/options to it
2318 stsv() {
2319     awk -v sort="sort -t \"$(printf '\t')\" $*" '
2320         { gsub(/\r$/, "") }
2321         NR == 1 { print; fflush() }
2322         NR > 1 { print | sort }
2323     '
2324 }
2325 
2326 # use the result of the `awk` function `substr` for each line
2327 substr() {
2328     local start="${1:-1}"
2329     local length="${2:-80}"
2330     [ $# -gt 0 ] && shift
2331     [ $# -gt 0 ] && shift
2332     awk -v start="${start}" -v len="${length}" \
2333         '{ printf "%s\n", substr($0, start, len); fflush() }' "$@"
2334 }
2335 
2336 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with
2337 # what's given, before relinquishing existing privileges
2338 # sudoff() {
2339 #     local code=0
2340 #     if [ $# -gt 0 ]; then
2341 #         sudo "$@"
2342 #         code=$?
2343 #     fi
2344 #     sudo -k
2345 #     return "${code}"
2346 # }
2347 
2348 # append a final Tab-Separated-Values line with the sums of all columns from
2349 # the input table(s) given; items from first lines aren't counted/added
2350 sumtsv() {
2351     awk -F "\t" '
2352         {
2353             print; fflush()
2354             if (width < NF) width = NF
2355         }
2356 
2357         FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 }
2358 
2359         END {
2360             for (i = 1; i <= width; i++) {
2361                 if (i > 1) printf "\t"
2362                 printf "%s", sums[i] ""
2363             }
2364             if (width > 0) printf "\n"
2365         }
2366     ' "$@"
2367 }
2368 
2369 # show a random command defined in `clam`, using `wat` from `clam` itself
2370 surprise() {
2371     wat "$(grep -E '^[a-z]+\(' "$(which clam)" | shuf -n 1 | sed -E 's-\(.*--')"
2372 }
2373 
2374 # Time the command given
2375 t() { time "$@"; }
2376 
2377 # show a reverse-sorted tally of all lines read, where ties are sorted
2378 # alphabetically
2379 tally() {
2380     awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" '
2381         # reassure users by instantly showing the header
2382         BEGIN { print "value\ttally"; fflush() }
2383         { gsub(/\r$/, ""); t[$0]++ }
2384         END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } }
2385     ' "$@"
2386 }
2387 
2388 # Tab AWK: TSV-specific I/O settings for `awk`
2389 # tawk() { awk -F "\t" -v OFS="\t" "$@"; }
2390 
2391 # Tab AWK: TSV-specific I/O settings for `awk`
2392 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; }
2393 
2394 # quick alias for my script `tbp`
2395 tb() { tbp "$@"; }
2396 
2397 # Title ECHO changes the tab-title on your terminal app
2398 techo() { printf "\e]0;%s\a\n" "$*"; }
2399 
2400 # simulate the cadence of old-fashioned teletype machines, by slowing down
2401 # the output of ASCII/UTF-8 symbols from the standard-input
2402 teletype() {
2403     awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | (
2404         IFS="$(printf "\n")"
2405         while read -r line; do
2406             echo "${line}" | sed -E 's-(.)-\1\n-g' |
2407                 while read -r item; do
2408                     sleep 0.01
2409                     printf "%s" "${item}"
2410                 done
2411             sleep 0.75
2412             printf "\n"
2413         done
2414     )
2415 }
2416 
2417 # run `top` without showing any of its output after quitting it
2418 tip() { tput smcup; top "$@"; tput rmcup; }
2419 
2420 # change the tab-title on your terminal app
2421 title() { printf "\e]0;%s\a\n" "$*"; }
2422 
2423 # quick alias for my script `tjp`
2424 tj() { tjp "$@"; }
2425 
2426 # quick alias for my script `tlp`
2427 tl() { tlp "$@"; }
2428 
2429 # show current date in a specifc format
2430 today() { date +'%Y-%m-%d %a %b %d'; }
2431 
2432 # get the first n lines, or 1 by default
2433 toline() { head -n "${1:-1}" "${2:--}"; }
2434 
2435 # lowercase all ASCII symbols
2436 tolower() { awk '{ print tolower($0); fflush() }' "$@"; }
2437 
2438 # play a tone/sine-wave sound lasting the number of seconds given, or for 1
2439 # second by default: after the optional duration, the next optional arguments
2440 # are the volume and the tone-frequency; uses my script `waveout`
2441 tone() {
2442     waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" |
2443         mpv --really-quiet -
2444 }
2445 
2446 # get the processes currently using the most cpu
2447 topcpu() {
2448     local n="${1:-10}"
2449     [ "$n" -gt 0 ] && ps aux | awk '
2450         NR == 1 { print; fflush() }
2451         NR > 1 { print | "sort -rnk3" }
2452     ' | head -n "$(("$n" + 1))"
2453 }
2454 
2455 # show all files directly in the folder given, without looking any deeper
2456 topfiles() {
2457     local arg
2458     for arg in "${@:-.}"; do
2459         if [ ! -d "${arg}" ]; then
2460             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2461             return 1
2462         fi
2463         stdbuf -oL find "${arg}" -maxdepth 1 -type f
2464     done
2465 }
2466 
2467 # show all folders directly in the folder given, without looking any deeper
2468 topfolders() {
2469     local arg
2470     for arg in "${@:-.}"; do
2471         if [ ! -d "${arg}" ]; then
2472             printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr
2473             return 1
2474         fi
2475         stdbuf -oL find "${arg}" -maxdepth 1 -type d |
2476             awk '!/^\.$/ { print; fflush() }'
2477     done
2478 }
2479 
2480 # get the processes currently using the most memory
2481 topmemory() {
2482     local n="${1:-10}"
2483     [ "$n" -gt 0 ] && ps aux | awk '
2484         NR == 1 { print; fflush() }
2485         NR > 1 { print | "sort -rnk6" }
2486     ' | head -n "$(("$n" + 1))"
2487 }
2488 
2489 # transpose (switch) rows and columns from tables
2490 transpose() {
2491     awk '
2492         { gsub(/\r$/, "") }
2493 
2494         NR == 1 && /\t/ { FS = "\t"; $0 = $0 }
2495 
2496         {
2497             for (i = 1; i <= NF; i++) lines[i][NR] = $i
2498             if (maxitems < NF) maxitems = NF
2499         }
2500 
2501         END {
2502             for (j = 1; j <= maxitems; j++) {
2503                 for (i = 1; i <= NR; i++) {
2504                     if (i > 1) printf "\t"
2505                     printf "%s", lines[j][i]
2506                 }
2507                 printf "\n"
2508             }
2509         }
2510     ' "$@"
2511 }
2512 
2513 # ignore leading/trailing spaces, as well as trailing carriage returns
2514 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2515 
2516 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2517 # decimal dots themselves, when decimals in a number are all zeros; works
2518 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub`
2519 # trimdecs() {
2520 #     awk '
2521 #         {
2522 #             $0 = gensub(/([0-9]+)\.0+/, "\\1", "g")
2523 #             $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g")
2524 #             print; fflush()
2525 #         }
2526 #     ' "$@"
2527 # }
2528 
2529 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the
2530 # decimal dots themselves, when decimals in a number are all zeros
2531 trimdecs() {
2532     awk '{ print; fflush() }' "$@" |
2533         sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g'
2534 }
2535 
2536 # ignore trailing spaces, as well as trailing carriage returns
2537 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2538 
2539 # ignore trailing spaces, as well as trailing carriage returns
2540 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2541 
2542 # ignore leading/trailing spaces, as well as trailing carriage returns
2543 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2544 
2545 # ignore leading/trailing spaces, as well as trailing carriage returns
2546 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; }
2547 
2548 # ignore trailing spaces, as well as trailing carriage returns
2549 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2550 
2551 # ignore trailing spaces, as well as trailing carriage returns
2552 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; }
2553 
2554 # try running a command, emitting an explicit message to standard-error
2555 # if the command given fails
2556 try() {
2557     "$@" || {
2558         printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2
2559         return 255
2560     }
2561 }
2562 
2563 # Transform Strings with Python; uses my script `tbp`
2564 tsp() { tbp -s "$@"; }
2565 
2566 # run the command given, trying to turn its output into TSV (tab-separated
2567 # values); uses my script `dejson`
2568 tsvrun() { jc "$@" | dejson; }
2569 
2570 # deduplicate lines, keeping them in their original order
2571 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; }
2572 
2573 # concatenate all named input sources unix-style: all trailing CRLFs become
2574 # single LFs, each non-empty input will always end in a LF, so lines from
2575 # different sources are accidentally joined; also leading UTF-8 BOMs on the
2576 # first line of each input are ignored, as those are useless at best
2577 unixify() {
2578     awk '
2579         FNR == 1 { gsub(/^\xef\xbb\xbf/, "") }
2580         { gsub(/\r$/, ""); print; fflush() }
2581     ' "$@"
2582 }
2583 
2584 # go UP n folders, or go up 1 folder by default
2585 up() {
2586     if [ "${1:-1}" -le 0 ]; then
2587         cd .
2588         return $?
2589     fi
2590 
2591     cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $?
2592 }
2593 
2594 # convert United States Dollars into CAnadian Dollars, using the latest
2595 # official exchange rates from the bank of canada; during weekends, the
2596 # latest rate may be from a few days ago; the default amount of usd to
2597 # convert is 1, when not given
2598 usd2cad() {
2599     local site='https://www.bankofcanada.ca/valet/observations/group'
2600     local csv_rates="${site}/FX_RATES_DAILY/csv"
2601     local url
2602     url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')"
2603     curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" '
2604         /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i }
2605         END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }'
2606 }
2607 
2608 # View with `less`
2609 v() { less -JMKiCRS "$@"; }
2610 
2611 # run a command, showing its success/failure right after
2612 verdict() {
2613     local code
2614     "$@"
2615     code=$?
2616 
2617     if [ "${code}" -eq 0 ]; then
2618         printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2
2619     else
2620         printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2
2621     fi
2622     return "${code}"
2623 }
2624 
2625 # run `cppcheck` with even stricter options
2626 vetc() { cppcheck --enable=portability --enable=style "$@"; }
2627 
2628 # run `cppcheck` with even stricter options
2629 vetcpp() { cppcheck --enable=portability --enable=style "$@"; }
2630 
2631 # check shell scripts for common gotchas, avoiding complaints about using
2632 # the `local` keyword, which is widely supported in practice
2633 vetshell() { shellcheck -e 3043 "$@"; }
2634 
2635 # View with Header runs `less` without line numbers, with ANSI styles, no
2636 # line-wraps, and using the first n lines as a sticky-header (1 by default),
2637 # so they always show on top
2638 vh() {
2639     local n="${1:-1}"
2640     [ $# -gt 0 ] && shift
2641     less --header="$n" -JMKiCRS "$@"
2642 }
2643 
2644 # View Nice Columns; uses my scripts `realign` and `nn`
2645 vnc() { realign "$@" | nn --gray | less -JMKiCRS; }
2646 
2647 # View Nice Hexadecimals; uses my script `nh`
2648 vnh() { nh "$@" | less -JMKiCRS; }
2649 
2650 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn`
2651 vnj() { nj "$@" | less -JMKiCRS; }
2652 
2653 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn`
2654 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; }
2655 
2656 # View Nice Numbers; uses my script `nn`
2657 vnn() { nn "${@:---gray}" | less -JMKiCRS; }
2658 
2659 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn`
2660 vnt() {
2661     awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" |
2662         nt | nn --gray |
2663         awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' |
2664         less -JMKiCRS #--header=1
2665 }
2666 
2667 # View Text with `less`
2668 # vt() { less -JMKiCRS "$@"; }
2669 
2670 # What are these (?); uses my command `nwat`
2671 # w() { nwat "$@"; }
2672 
2673 # What Are These (?) shows what the names given to it are/do
2674 wat() {
2675     local a
2676 
2677     if [ $# -eq 0 ]; then
2678         printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr
2679         return 1
2680     fi
2681 
2682     for a in "$@"; do
2683         # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a"
2684         printf "\e[7m%-80s\e[0m\n" "$a"
2685 
2686         # resolve 1 alias level
2687         if alias "$a" 2> /dev/null > /dev/null; then
2688             a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")"
2689         fi
2690 
2691         if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then
2692             # resolved aliases with args/spaces in them would otherwise fail
2693             echo "$a"
2694         elif whence -f "$a" > /dev/null 2> /dev/null; then
2695             # zsh seems to show a shell function's code only via `whence -f`
2696             whence -f "$a"
2697         elif type "$a" > /dev/null 2> /dev/null; then
2698             # dash doesn't support `declare`, and `type` in bash emits
2699             # a redundant first output line, when it's a shell function
2700             type "$a" | awk '
2701                 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next }
2702                 { print; fflush() }
2703                 END { if (NR < 2 && skipped) print skipped }
2704             '
2705         else
2706             printf "\e[31m%s not found\e[0m\n" "$a"
2707         fi
2708     done | less -JMKiCRS
2709 }
2710 
2711 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated
2712 # lines instead
2713 wctsv() {
2714     printf "file\tbytes\tlines\tcharacters\twords\tlongest\n"
2715     stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \
2716         's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' |
2717         awk '
2718             NR > 1 { print prev; fflush() }
2719             { prev = $0 }
2720             END { if (NR == 1 || !/^total\t/) print }
2721         '
2722 }
2723 
2724 # get weather forecasts, almost filling the terminal's current width
2725 weather() {
2726     # ● ⬤
2727     printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" |
2728         curl --show-error -s telnet://graph.no:79 |
2729         sed -u -E \
2730             -e 's/ *\r?$//' \
2731             -e '/^\[/d' \
2732             -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \
2733             -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \
2734             -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \
2735             -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \
2736             -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \
2737             -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \
2738             -e 's/\*/○/g'
2739 }
2740 
2741 # recursively find all files with trailing spaces/CRs
2742 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2743 
2744 # recursively find all files with trailing spaces/CRs
2745 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; }
2746 
2747 # run `xargs`, using whole lines as extra arguments
2748 x() { xargs -d '\n' "$@"; }
2749 
2750 # run `xargs`, using zero/null bytes as the extra-arguments terminator
2751 x0() { xargs -0 "$@"; }
2752 
2753 # run `xargs`, using whole lines as extra arguments
2754 xl() { xargs -d '\n' "$@"; }
2755 
2756 # Youtube Audio Player
2757 yap() {
2758     local url
2759     # some youtube URIs end with extra playlist/tracker parameters
2760     url="$(echo "$1" | sed 's-&.*--')"
2761     mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)"
2762 }
2763 
2764 # show a calendar for the current year, or for the year given
2765 year() {
2766     {
2767         # show the current date/time center-aligned
2768         printf "%22s\e[32m%s\e[0m  \e[34m%s\e[0m\n\n" \
2769             "" "$(date +'%a %b %d')" "$(date +%T)"
2770         # show a whole-year calendar
2771         cal -y "$@"
2772     } | less -JMKiCRS
2773 }
2774 
2775 # show the current date in the YYYY-MM-DD format
2776 ymd() { date +'%Y-%m-%d'; }
2777 
2778 # YouTube Url
2779 ytu() {
2780     local url
2781     # some youtube URIs end with extra playlist/tracker parameters
2782     url="$(echo "$1" | sed 's-&.*--')"
2783     [ $# -gt 0 ] && shift
2784     yt-dlp "$@" --get-url "${url}"
2785 }