File: clam.sh 1 #!/bin/sh 2 3 # The MIT License (MIT) 4 # 5 # Copyright © 2024 pacman64 6 # 7 # Permission is hereby granted, free of charge, to any person obtaining a copy 8 # of this software and associated documentation files (the “Software”), to deal 9 # in the Software without restriction, including without limitation the rights 10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 # copies of the Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice shall be included in 15 # all copies or substantial portions of the Software. 16 # 17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 # SOFTWARE. 24 25 26 # clam 27 # 28 # Command-Line Augmentation Module (clam): get the best out of your shell 29 # 30 # 31 # This is a collection of arguably useful shell functions and shortcuts: 32 # some of these extra commands can be real time/effort savers, ideally 33 # letting you concentrate on getting things done. 34 # 35 # Some of these commands depend on my other scripts from the `pac-tools`, 36 # others either rely on widely-preinstalled command-line apps, or ones 37 # which are available on most of the major command-line `package` managers. 38 # 39 # Among these commands, you'll notice a preference for lines whose items 40 # are tab-separated instead of space-separated, and unix-style lines, which 41 # always end with a line-feed, instead of a CRLF byte-pair. This convention 42 # makes plain-text data-streams less ambiguous and generally easier to work 43 # with, especially when passing them along pipes. 44 # 45 # To use this script, you're supposed to `source` it, so its definitions 46 # stay for your whole shell session: for that, you can run `source clam` or 47 # `. clam` (no quotes either way), either directly or at shell startup. 48 # 49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is 50 # debian linux's default non-interactive shell. Some of its commands even 51 # seem to work on busybox's shell. 52 53 54 # handle help options 55 case "$1" in 56 -h|--h|-help|--help) 57 # show help message, using the info-comment from this very script 58 awk '/^# +clam/, /^$/ { gsub(/^# ?/, ""); print }' "$0" 59 exit 0 60 ;; 61 esac 62 63 64 # dash doesn't support regex-matching syntax, forcing to use case statements 65 case "$0" in 66 -bash|-dash|-sh|bash|dash|sh) 67 # script is being sourced with bash or dash, which is good 68 : 69 ;; 70 *) 71 case "$ZSH_EVAL_CONTEXT" in 72 *:file) 73 # script is being sourced with zsh, which is good 74 : 75 ;; 76 *) 77 # script is being run normally, which is a waste of time 78 printf "\e[48;2;255;255;135m\e[30mDon't run this script, source it instead: to do that,\e[0m\n" 79 printf "\e[48;2;255;255;135m\e[30mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n" 80 # failing during shell-startup may deny shell access, so exit 81 # with a 0 error-code to declare success 82 exit 0 83 ;; 84 esac 85 ;; 86 esac 87 88 89 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side) 90 c1() { bsbs 1 "$@"; } 91 c2() { bsbs 2 "$@"; } 92 c3() { bsbs 3 "$@"; } 93 c4() { bsbs 4 "$@"; } 94 c5() { bsbs 5 "$@"; } 95 c6() { bsbs 6 "$@"; } 96 c7() { bsbs 7 "$@"; } 97 c8() { bsbs 8 "$@"; } 98 c9() { bsbs 9 "$@"; } 99 100 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side) 101 alias 1=c1 102 alias 2=c2 103 alias 3=c3 104 alias 4=c4 105 alias 5=c5 106 alias 6=c6 107 alias 7=c7 108 alias 8=c8 109 alias 9=c9 110 111 # n-Column-layout shortcuts, using my script `bsbs` (Book-like Side By Side) 112 alias 1c=c1 113 alias 2c=c2 114 alias 3c=c3 115 alias 4c=c4 116 alias 5c=c5 117 alias 6c=c6 118 alias 7c=c7 119 alias 8c=c8 120 alias 9c=c9 121 122 # Avoid/ignore lines which match any of the regexes given 123 alias a=avoid 124 125 # find name from the local `apt` database of installable packages 126 # aptfind() { 127 # # despite warnings, the `apt search` command has been around for years 128 # # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//' 129 # apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | sed -u 's/^--$//' 130 # } 131 132 # emit each argument given as its own line of output 133 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; } 134 135 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become 136 # their basic ASCII counterparts, and where non-ASCII symbols become question 137 # marks, one question mark for each code-point byte 138 asciify() { iconv -f utf-8 -t ascii//translit "$@"; } 139 140 # avoid/ignore lines which match any of the regexes given 141 avoid() { 142 awk ' 143 BEGIN { 144 for (i = 1; i < ARGC; i++) { 145 e[i] = ARGV[i] 146 delete ARGV[i] 147 } 148 } 149 150 { 151 for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next 152 print; fflush() 153 got++ 154 } 155 156 END { exit(got == 0) } 157 ' "${@:-^\r?$}" 158 } 159 160 # emit a line with a repeating ball-like symbol in it 161 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; } 162 163 # show an ansi-styled BANNER-like line 164 banner() { printf "\e[7m%s\e[0m\n" "$*"; } 165 166 # emit a colored bar which can help visually separate different outputs 167 bar() { 168 [ "${1:-80}" -gt 0 ] && 169 printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" 170 } 171 172 # process Blocks/paragraphs of non-empty lines with AWK 173 # bawk() { awk -F='' -v RS='' "$@"; } 174 175 # process Blocks/paragraphs of non-empty lines with AWK 176 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 177 178 # play a repeating and annoying high-pitched beep sound a few times a second, 179 # lasting the number of seconds given, or for 1 second by default; uses my 180 # script `waveout` 181 beeps() { 182 local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)' 183 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 184 } 185 186 # start by joining all arguments given as a tab-separated-items line of output, 187 # followed by all lines from stdin verbatim 188 begintsv() { 189 awk ' 190 BEGIN { 191 for (i = 1; i < ARGC; i++) { 192 if (i > 1) printf "\t" 193 printf "%s", ARGV[i] 194 delete ARGV[i] 195 } 196 if (ARGC > 1) printf "\n" 197 fflush() 198 } 199 { print; fflush() } 200 ' "$@" 201 } 202 203 # play a repeating synthetic-bell-like sound lasting the number of seconds 204 # given, or for 1 second by default; uses my script `waveout` 205 bell() { 206 local f='sin(880*tau*u) * exp(-10*u)' 207 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 208 } 209 210 # play a repeating sound with synthetic-bells, lasting the number of seconds 211 # given, or for 1 second by default; uses my script `waveout` 212 bells() { 213 local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2" 214 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 215 } 216 217 # Breathe Header: add an empty line after the first one (the header), then 218 # separate groups of 5 lines (by default) with empty lines between them 219 bh() { 220 local n="${1:-5}" 221 [ $# -gt 0 ] && shift 222 awk -v n="$n" ' 223 BEGIN { if (n == 0) n = -1 } 224 (NR - 1) % n == 1 && NR > 1 { print "" } 225 { print; fflush() } 226 ' "$@" 227 } 228 229 # recursively find all files with at least the number of bytes given; when 230 # not given a minimum byte-count, the default is 100 binary megabytes 231 bigfiles() { 232 local n 233 n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')" 234 [ $# -gt 0 ] && shift 235 236 local arg 237 for arg in "${@:-.}"; do 238 if [ ! -d "${arg}" ]; then 239 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 240 return 1 241 fi 242 stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c 243 done 244 } 245 246 # Breathe Lines: separate groups of 5 lines (by default) with empty lines 247 bl() { 248 local n="${1:-5}" 249 [ $# -gt 0 ] && shift 250 awk -v n="$n" ' 251 BEGIN { if (n == 0) n = -1 } 252 NR % n == 1 && NR != 1 { print "" } 253 { print; fflush() } 254 ' "$@" 255 } 256 257 # process BLocks/paragraphs of non-empty lines with AWK 258 # blawk() { awk -F='' -v RS='' "$@"; } 259 260 # process BLocks/paragraphs of non-empty lines with AWK 261 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 262 263 # emit a line with a repeating block-like symbol in it 264 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; } 265 266 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses 267 # my script `bsbs` 268 bman() { 269 local w 270 w="$(tput cols)" 271 if [ "$w" -gt 90 ]; then 272 w="$((w / 2 - 1))" 273 fi 274 MANWIDTH="$w" man "$@" | bsbs 2 275 } 276 277 # split lines using the regex given, turning them into single-item lines 278 breakdown() { 279 local sep="${1:- }" 280 [ $# -gt 0 ] && shift 281 awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@" 282 } 283 284 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses 285 # my script `bsbs` 286 bookman() { 287 local w 288 w="$(tput cols)" 289 if [ "$w" -gt 90 ]; then 290 w="$((w / 2 - 1))" 291 fi 292 MANWIDTH="$w" man "$@" | bsbs 2 293 } 294 295 # separate groups of 5 lines (by default) with empty lines 296 breathe() { 297 local n="${1:-5}" 298 [ $# -gt 0 ] && shift 299 awk -v n="$n" ' 300 BEGIN { if (n == 0) n = -1 } 301 NR % n == 1 && NR != 1 { print "" } 302 { print; fflush() } 303 ' "$@" 304 } 305 306 # Browse Text 307 bt() { less -JMKNiCRS "$@"; } 308 309 # show a reverse-sorted tally of all lines read, where ties are sorted 310 # alphabetically, and where trailing bullets are added to quickly make 311 # the tally counts comparable at a glance 312 bully() { 313 awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" ' 314 # reassure users by instantly showing the header 315 BEGIN { print "value\ttally\tbullets"; fflush() } 316 317 { gsub(/\r$/, ""); tally[$0]++ } 318 319 END { 320 # find the max tally, which is needed to build the bullets-string 321 max = 0 322 for (k in tally) { 323 if (max < tally[k]) max = tally[k] 324 } 325 326 # make enough bullets for all tallies: this loop makes growing the 327 # string a task with complexity O(n * log n), instead of a naive 328 # O(n**2), which can slow-down things when tallies are high enough 329 bullets = "•" 330 for (n = max; n > 1; n /= 2) { 331 bullets = bullets bullets 332 } 333 334 # emit unsorted output lines to the sort cmd, which will emit the 335 # final reverse-sorted tally lines 336 for (k in tally) { 337 s = substr(bullets, 1, tally[k]) 338 printf("%s\t%d\t%s\n", k, tally[k], s) | sort 339 } 340 } 341 ' "$@" 342 } 343 344 # play a busy-phone-line sound lasting the number of seconds given, or for 1 345 # second by default; uses my script `waveout` 346 busy() { 347 # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2' 348 local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2' 349 # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))' 350 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 351 } 352 353 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default 354 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 355 356 # keep all BUT the LAST n lines, or skip just the last line by default 357 butlast() { head -n -"${1:-1}" "${2:--}"; } 358 359 # load bytes from the filenames given 360 bytes() { cat "$@"; } 361 362 # quick alias for `cat` 363 alias c=cat 364 365 # CAlculator with Nice numbers runs my script `ca` and colors results with 366 # my script `nn`, alternating styles to make long numbers easier to read 367 can() { ca "$@" | nn --gray; } 368 369 # conCATenate Lines 370 catl() { 371 awk ' 372 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 373 { gsub(/\r$/, ""); print; fflush() } 374 ' "$@" 375 } 376 377 # Csv AWK: CSV-specific input settings for `awk` 378 # cawk() { awk --csv "$@"; } 379 380 # Csv AWK: CSV-specific input settings for `awk` 381 cawk() { stdbuf -oL awk --csv "$@"; } 382 383 # Compile C Stripped 384 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; } 385 386 # Colored Go Test on the folder given; uses my command `gbm` 387 cgt() { go test "${1:-.}" 2>&1 | gbm '^ok' '^[-]* ?FAIL' '^\?'; } 388 389 # ignore final life-feed from text, if it's the very last byte; also ignore 390 # all trailing carriage-returns 391 choplf() { 392 awk ' 393 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 394 NR > 1 { print ""; fflush() } 395 { gsub(/\r$/, ""); printf "%s", $0; fflush() } 396 ' "$@" 397 } 398 399 # Color Json using the `jq` app, allowing an optional filepath as the data 400 # source, and even an optional transformation formula 401 cj() { jq -C "${2:-.}" "${1:--}"; } 402 403 # show a live digital clock 404 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; } 405 406 # Colored Live/Line-buffered RipGrep ensures results show up immediately, 407 # also emitting colors when piped 408 clrg() { rg --color=always --line-buffered "$@"; } 409 410 # CLear Screen, like the old dos command of the same name 411 cls() { clear; } 412 413 # COunt COndition: count how many times the AWK expression given is true 414 coco() { 415 local cond="${1:-1}" 416 [ $# -gt 0 ] && shift 417 awk " 418 { low = lower = tolower(\$0) } 419 ${cond} { count++ } 420 END { print count } 421 " "$@" 422 } 423 424 # Colored RipGrep ensures app `rg` emits colors when piped 425 crg() { rg --color=always --line-buffered "$@"; } 426 427 # emit a line with a repeating cross-like symbol in it 428 crosses() { 429 [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g' 430 } 431 432 # split lines using the regex given, turning them into single-item lines 433 crumble() { 434 local sep="${1:- }" 435 [ $# -gt 0 ] && shift 436 awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@" 437 } 438 439 # turn Comma-Separated-Values tables into Tab-Separated-Values tables 440 csv2tsv() { xsv fmt -t '\t' "$@"; } 441 442 # Change Units turns common US units into international ones; uses my 443 # scripts `bu` (Better Units) and `nn` (Nice Numbers) 444 cu() { 445 bu "$@" | awk ' 446 NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF } 447 NF == 4 && $NF != "s" { print $NF } 448 ' | nn --gray 449 } 450 451 # CURL Silent spares you the progress bar, but still tells you about errors 452 curls() { curl --show-error -s "$@"; } 453 454 # Count (condition) With AWK: count the times the AWK expression given is true 455 cwawk() { 456 local cond="${1:-1}" 457 [ $# -gt 0 ] && shift 458 awk " 459 { low = lower = tolower(\$0) } 460 ${cond} { count++ } 461 END { print count } 462 " "$@" 463 } 464 465 # emit a line with a repeating dash-like symbol in it 466 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; } 467 468 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring 469 # the leading data-URI declaration, if present 470 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; } 471 472 # DECAPitate (lines) emits the first line as is, piping all lines after that 473 # to the command given, passing all/any arguments/options to it 474 # decap() { 475 # awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }' 476 # } 477 478 # turn Comma-Separated-Values tables into tab-separated-values tables 479 # decsv() { xsv fmt -t '\t' "$@"; } 480 481 # DEDUPlicate prevents lines from appearing more than once 482 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; } 483 484 # dictionary-define the word given, using an online service 485 define() { 486 local arg 487 local gap=0 488 for arg in "$@"; do 489 [ "${gap}" -gt 0 ] && printf "\n" 490 gap=1 491 printf "\x1b[7m%-80s\x1b[0m\n" "${arg}" 492 curl -s "dict://dict.org/d:${arg}" | awk ' 493 { gsub(/\r$/, "") } 494 /^151 / { 495 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush() 496 next 497 } 498 /^[1-9][0-9]{2} / { 499 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush() 500 next 501 } 502 { print; fflush() } 503 ' 504 done | less -JMKiCRS 505 } 506 507 # DEcompress GZip-encoded data 508 # degz() { zcat "$@"; } 509 510 # turn JSON Lines into a proper json array 511 dejsonl() { jq -s -M "${@:-.}"; } 512 513 # delay lines from the standard-input, waiting the number of seconds given 514 # for each line, or waiting 1 second by default 515 # delay() { 516 # local seconds="${1:-1}" 517 # ( 518 # IFS="$(printf "\n")" 519 # while read -r line; do 520 # sleep "${seconds}" 521 # printf "%s\n" "${line}" 522 # done 523 # ) 524 # } 525 526 # expand tabs each into up to the number of space given, or 4 by default 527 detab() { expand -t "${1:-4}"; } 528 529 # ignore trailing spaces, as well as trailing carriage returns 530 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 531 532 # turn UTF-16 data into UTF-8 533 deutf16() { iconv -f utf16 -t utf8 "$@"; } 534 535 # DIVide 2 numbers 3 ways, including the complement 536 div() { 537 awk -v a="${1:-1}" -v b="${2:-1}" ' 538 BEGIN { 539 gsub(/_/, "", a) 540 gsub(/_/, "", b) 541 if (a > b) { c = a; a = b; b = c } 542 c = 1 - a / b 543 if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c 544 else printf "%f\n%f\n", a / b, b / a 545 exit 546 }' 547 } 548 549 # get/fetch data from the filename or URI given; named `dog` because dogs can 550 # `fetch` things for you 551 # dog() { 552 # if [ $# -gt 1 ]; then 553 # printf "\e[31mdogs only have 1 mouth to fetch with\e[0m\n" >&2 554 # return 1 555 # fi 556 # 557 # if [ -e "$1" ]; then 558 # cat "$1" 559 # return $? 560 # fi 561 # 562 # case "${1:--}" in 563 # -) cat -;; 564 # file://*|https://*|http://*) curl --show-error -s "$1";; 565 # ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";; 566 # dict://*|telnet://*) curl --show-error -s "$1";; 567 # data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;; 568 # *) curl --show-error -s "https://$1";; 569 # esac 2> /dev/null || { 570 # printf "\e[31mcan't fetch %s\e[0m\n" "${1:--}" >&2 571 # return 1 572 # } 573 # } 574 575 # emit a line with a repeating dot-like symbol in it 576 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; } 577 578 # ignore/remove all matched regexes given on all stdin lines 579 drop() { 580 awk ' 581 BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } } 582 { 583 for (i = 1; i < ARGC; i++) gsub(e[i], "") 584 print; fflush() 585 } 586 ' "${@:-\r$}" 587 } 588 589 # show the current Date and Time 590 dt() { 591 printf "\e[32m%s\e[0m \e[34m%s\e[0m\n" "$(date +'%a %b %d')" "$(date +%T)" 592 } 593 594 # show the current Date, Time, and a Calendar with the 3 `current` months 595 dtc() { 596 # show the current date/time center-aligned 597 printf "%22s\e[32m%s\e[0m \e[34m%s\e[0m\n\n" \ 598 "" "$(date +'%a %b %d')" "$(date +%T)" 599 # debian linux has a different `cal` app which highlights the day 600 if [ -e "/usr/bin/ncal" ]; then 601 ncal -C -3 602 else 603 cal -3 604 fi 605 } 606 607 # quick alias for `echo` 608 alias e=echo 609 610 # Evaluate Awk expression 611 ea() { 612 local expr="${1:-0}" 613 [ $# -gt 0 ] && shift 614 awk "BEGIN { print ${expr}; exit }" "$@" 615 } 616 617 # Extended-mode Grep, enabling its full regex syntax 618 eg() { grep -E --line-buffered "$@"; } 619 620 # Extended Grep, Recursive Interactive and Plain 621 # egrip() { ugrep -r -Q --color=never -E "$@"; } 622 623 # show all empty files in a folder, digging recursively 624 emptyfiles() { 625 local arg 626 for arg in "${@:-.}"; do 627 if [ ! -d "${arg}" ]; then 628 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 629 return 1 630 fi 631 stdbuf -oL find "${arg}" -type f -size 0c 632 done 633 } 634 635 # Evaluate Nodejs expression 636 # en() { 637 # local expr="${1:-null}" 638 # expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')" 639 # node -e "console.log(${expr})" | sed -u 's-\x1b\[[^A-Za-z]+[A-Za-z]--g' 640 # } 641 642 # Evaluate Python expression 643 ep() { python -c "print(${1:-None})"; } 644 645 # Extended Plain Interactive Grep 646 epig() { ugrep --color=never -Q -E "$@"; } 647 648 # Extended Plain Recursive Interactive Grep 649 eprig() { ugrep --color=never -Q -E "$@"; } 650 651 # Evaluate Ruby expression 652 er() { ruby -e "puts ${1:-nil}"; } 653 654 # ignore/remove all matched regexes given on all stdin lines 655 erase() { 656 awk ' 657 BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } } 658 { 659 for (i = 1; i < ARGC; i++) gsub(e[i], "") 660 print; fflush() 661 } 662 ' "${@:-\r$}" 663 } 664 665 # Editor Read-Only 666 ero() { micro -readonly true "$@"; } 667 668 # Extended-mode Sed, enabling its full regex syntax 669 es() { sed -E -u "$@"; } 670 671 # convert EURos into CAnadian Dollars, using the latest official exchange 672 # rates from the bank of canada; during weekends, the latest rate may be 673 # from a few days ago; the default amount of euros to convert is 1, when 674 # not given 675 eur2cad() { 676 local site='https://www.bankofcanada.ca/valet/observations/group' 677 local csv_rates="${site}/FX_RATES_DAILY/csv" 678 local url 679 url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')" 680 curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" ' 681 /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i } 682 END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }' 683 } 684 685 # EValuate AWK expression 686 evawk() { 687 local expr="${1:-0}" 688 [ $# -gt 0 ] && shift 689 awk "BEGIN { print ${expr}; exit }" "$@" 690 } 691 692 # convert fahrenheit into celsius 693 fahrenheit() { 694 echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | 695 awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }' 696 } 697 698 # Flushed AWK 699 fawk() { stdbuf -oL awk "$@"; } 700 701 # fetch/web-request all URIs given, using protcol HTTPS when none is given 702 fetch() { 703 local a 704 for a in "$@"; do 705 case "$a" in 706 file://*|https://*|http://*) curl --show-error -s "$a";; 707 ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";; 708 dict://*|telnet://*) curl --show-error -s "$a";; 709 data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;; 710 *) curl --show-error -s "https://$a";; 711 esac 712 done 713 } 714 715 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings 716 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; } 717 718 # show all files in a folder, digging recursively 719 files() { 720 local arg 721 for arg in "${@:-.}"; do 722 if [ ! -d "${arg}" ]; then 723 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 724 return 1 725 fi 726 stdbuf -oL find "${arg}" -type f 727 done 728 } 729 730 # recursively find all files with fewer bytes than the number given 731 filesunder() { 732 local n 733 n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')" 734 [ $# -gt 0 ] && shift 735 736 local arg 737 for arg in "${@:-.}"; do 738 if [ ! -d "${arg}" ]; then 739 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 740 return 1 741 fi 742 stdbuf -oL find "${arg}" -type f -size -"$n"c 743 done 744 } 745 746 # get the first n lines, or 1 by default 747 first() { head -n "${1:-1}" "${2:--}"; } 748 749 # limit data up to the first n bytes 750 firstbytes() { head -c "$1" "${2:--}"; } 751 752 # get the first n lines, or 1 by default 753 firstlines() { head -n "${1:-1}" "${2:--}"; } 754 755 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's 756 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds, 757 # and ensuring each input's last line ends with a line-feed 758 fixlines() { 759 awk ' 760 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 761 { gsub(/\r$/, ""); print; fflush() } 762 ' "$@" 763 } 764 765 # FLushed AWK 766 # flawk() { stdbuf -oL awk "$@"; } 767 768 # First Line AWK, emits the first line as is, and uses the rest of the args 769 # given by injecting the first into the script, and passing all later args as 770 # later args to `awk` as given 771 flawk() { 772 local code="${1:-1}" 773 [ $# -gt 0 ] && shift 774 stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@" 775 } 776 777 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what 778 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes 779 # involving several steps 780 fleak() { 781 awk ' 782 { 783 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") 784 printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr" 785 print; fflush() 786 } 787 ' "$@" 788 } 789 790 # try to run the command given using line-buffering for its (standard) output 791 flushlines() { stdbuf -oL "$@"; } 792 793 # show all folders in a folder, digging recursively 794 folders() { 795 local arg 796 for arg in "${@:-.}"; do 797 if [ ! -d "${arg}" ]; then 798 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 799 return 1 800 fi 801 stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }' 802 done 803 } 804 805 # start from the line number given, skipping all previous ones 806 fromline() { tail -n +"${1:-1}" "${2:--}"; } 807 808 # convert FeeT into meters 809 ft() { 810 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 811 awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }' 812 } 813 814 # convert FeeT² (squared) into meters² 815 ft2() { 816 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 817 awk '/./ { printf "%.2f\n", 0.09290304 * $0 }' 818 } 819 820 # Get/fetch data from the filenames/URIs given; uses my script `get` 821 # alias g=get 822 823 # run `grep` in extended-regex mode, enabling its full regex syntax 824 # g() { grep -E --line-buffered "$@"; } 825 826 # convert GALlons into liters 827 gal() { 828 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 829 awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }' 830 } 831 832 # convert binary GigaBytes into bytes 833 gb() { 834 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 835 awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' | 836 sed 's-\.00*$--' 837 } 838 839 # Good, Bad, Meh colors lines using up to 3 regular expressions 840 gbm() { 841 local good="$1" 842 local bad="$2" 843 local meh="$3" 844 [ $# -gt 0 ] && shift 845 [ $# -gt 0 ] && shift 846 [ $# -gt 0 ] && shift 847 848 awk ' 849 BEGIN { 850 gotgood = ARGC > 1 && ARGV[1] != "" 851 gotbad = ARGC > 2 && ARGV[2] != "" 852 gotmeh = ARGC > 3 && ARGV[3] != "" 853 good = ARGV[1] 854 bad = ARGV[2] 855 meh = ARGV[3] 856 delete ARGV[1] 857 delete ARGV[2] 858 delete ARGV[3] 859 } 860 861 gotgood && $0 ~ good { 862 # code to use a color-blind-friendlier blue, instead of green 863 # gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;95;215m") 864 # printf "\x1b[38;2;0;95;215m%s\x1b[0m\n", $0 865 gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;0;135;95m") 866 printf "\x1b[38;2;0;135;95m%s\x1b[0m\n", $0; fflush() 867 next 868 } 869 870 gotbad && $0 ~ bad { 871 gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;204;0;0m") 872 printf "\x1b[38;2;204;0;0m%s\x1b[0m\n", $0; fflush() 873 next 874 } 875 876 gotmeh && $0 ~ meh { 877 gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m") 878 printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush() 879 next 880 } 881 882 { print; fflush() } 883 ' "${good}" "${bad}" "${meh}" "$@" 884 } 885 886 # glue/stick together various lines, only emitting a line-feed at the end; an 887 # optional argument is the output-item-separator, which is empty by default 888 glue() { 889 local sep="${1:-}" 890 [ $# -gt 0 ] && shift 891 awk -v sep="${sep}" ' 892 NR > 1 { printf "%s", sep } 893 { gsub(/\r/, ""); printf "%s", $0; fflush() } 894 END { if (NR > 0) print ""; fflush() } 895 ' "$@" 896 } 897 898 # GO Build Stripped: a common use-case for the go compiler 899 gobs() { go build -ldflags "-s -w" -trimpath "$@"; } 900 901 # GO DEPendencieS: show all dependencies in a go project 902 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; } 903 904 # GO IMPortS: show all imports in a go project 905 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; } 906 907 # go to the folder picked using an interactive TUI; uses my script `bf` 908 goto() { 909 local where 910 where="$(bf "${1:-.}")" 911 if [ $? -ne 0 ]; then 912 return 0 913 fi 914 915 where="$(realpath "${where}")" 916 if [ ! -d "${where}" ]; then 917 where="$(dirname "${where}")" 918 fi 919 cd "${where}" || return 920 } 921 922 # GRayed-out lines with AWK 923 grawk() { 924 local cond="${1:-1}" 925 [ $# -gt 0 ] && shift 926 awk "${cond}"' { 927 gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m") 928 printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush() 929 next 930 } 931 { print; fflush() } 932 ' "$@" 933 } 934 935 # Style lines using a GRAY-colored BACKground 936 grayback() { 937 awk ' 938 { 939 gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m") 940 printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush() 941 } 942 ' "$@" 943 } 944 945 # Grep, Recursive Interactive and Plain 946 # grip() { ugrep -r -Q --color=never -E "$@"; } 947 948 # Global extended regex SUBstitute, using the AWK function of the same name: 949 # arguments are used as regex/replacement pairs, in that order 950 gsub() { 951 awk ' 952 BEGIN { 953 for (i = 1; i < ARGC; i++) { 954 args[++n] = ARGV[i] 955 delete ARGV[i] 956 } 957 } 958 { 959 for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1]) 960 print; fflush() 961 } 962 ' "$@" 963 } 964 965 # Highlight (lines) with AWK 966 hawk() { 967 local cond="${1:-1}" 968 [ $# -gt 0 ] && shift 969 awk ' 970 { low = lower = tolower($0) } 971 '"${cond}"' { 972 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 973 printf "\x1b[7m%s\x1b[0m\n", $0 974 fflush() 975 next 976 } 977 { print; fflush() } 978 ' "$@" 979 } 980 981 # play a heartbeat-like sound lasting the number of seconds given, or for 1 982 # second by default; uses my script `waveout` 983 heartbeat() { 984 local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])' 985 local b='((12, u), (8, (u-0.25)%1))' 986 local f="sum($a for v in $b) / 2" 987 # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2' 988 # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2' 989 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 990 } 991 992 # Highlighted-style ECHO 993 hecho() { printf "\e[7m%s\e[0m\n" "$*"; } 994 995 # show each byte as a pair of HEXadecimal (base-16) symbols 996 hexify() { 997 cat "$@" | od -x -A n | 998 awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }' 999 } 1000 1001 # HIghlighted-style ECHO 1002 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; } 1003 1004 # highlight lines 1005 highlight() { 1006 awk ' 1007 { 1008 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 1009 printf "\x1b[7m%s\x1b[0m\n", $0; fflush() 1010 } 1011 ' "$@" 1012 } 1013 1014 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what 1015 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes 1016 # involving several steps 1017 hileak() { 1018 awk ' 1019 { 1020 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") 1021 printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr" 1022 print; fflush() 1023 } 1024 ' "$@" 1025 } 1026 1027 # highlight lines 1028 hilite() { 1029 awk ' 1030 { 1031 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 1032 printf "\x1b[7m%s\x1b[0m\n", $0; fflush() 1033 } 1034 ' "$@" 1035 } 1036 1037 # Help Me Remember my custom shell commands 1038 hmr() { 1039 local cmd="bat" 1040 # debian linux uses a different name for the `bat` app 1041 if [ -e "/usr/bin/batcat" ]; then 1042 cmd="batcat" 1043 fi 1044 1045 "$cmd" \ 1046 --style=plain,header,numbers --theme='Monokai Extended Light' \ 1047 --wrap=never --color=always "$(which clam)" | 1048 sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS 1049 } 1050 1051 # convert seconds into a colon-separated Hours-Minutes-Seconds triple 1052 hms() { 1053 echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ { 1054 x = $0 1055 h = (x - x % 3600) / 3600 1056 m = (x % 3600) / 60 1057 s = x % 60 1058 printf "%02d:%02d:%05.2f\n", h, m, s; fflush() 1059 }' 1060 } 1061 1062 # find all hyperlinks inside HREF attributes in the input text 1063 href() { 1064 awk ' 1065 BEGIN { e = "href=\"[^\"]+\"" } 1066 { 1067 for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) { 1068 print substr(s, RSTART + 6, RLENGTH - 7); fflush() 1069 } 1070 } 1071 ' "$@" 1072 } 1073 1074 # Index all lines starting from 0, using a tab right after each line number 1075 # i() { 1076 # local start="${1:-0}" 1077 # [ $# -gt 0 ] && shift 1078 # nl -b a -w 1 -v "${start}" "$@" 1079 # } 1080 1081 # Index all lines starting from 0, using a tab right after each line number 1082 i() { 1083 local start="${1:-0}" 1084 [ $# -gt 0 ] && shift 1085 stdbuf -oL nl -b a -w 1 -v "${start}" "$@" 1086 } 1087 1088 # avoid/ignore lines which case-insensitively match any of the regexes given 1089 iavoid() { 1090 awk ' 1091 BEGIN { 1092 if (IGNORECASE == "") { 1093 m = "this variant of AWK lacks case-insensitive regex-matching" 1094 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr" 1095 exit 125 1096 } 1097 IGNORECASE = 1 1098 1099 for (i = 1; i < ARGC; i++) { 1100 e[i] = ARGV[i] 1101 delete ARGV[i] 1102 } 1103 } 1104 1105 { 1106 for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next 1107 print; fflush(); got++ 1108 } 1109 1110 END { exit(got == 0) } 1111 ' "${@:-^\r?$}" 1112 } 1113 1114 # case-Insensitively DEDUPlicate prevents lines from appearing more than once 1115 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; } 1116 1117 # ignore/remove all case-insensitively matched regexes given on all stdin lines 1118 idrop() { 1119 awk ' 1120 BEGIN { 1121 if (IGNORECASE == "") { 1122 m = "this variant of AWK lacks case-insensitive regex-matching" 1123 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr" 1124 exit 125 1125 } 1126 IGNORECASE = 1 1127 1128 for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } 1129 } 1130 1131 { 1132 for (i = 1; i < ARGC; i++) gsub(e[i], "") 1133 print; fflush() 1134 } 1135 ' "${@:-\r$}" 1136 } 1137 1138 # ignore/remove all case-insensitively matched regexes given on all stdin lines 1139 ierase() { 1140 awk ' 1141 BEGIN { 1142 if (IGNORECASE == "") { 1143 m = "this variant of AWK lacks case-insensitive regex-matching" 1144 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr" 1145 exit 125 1146 } 1147 IGNORECASE = 1 1148 1149 for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } 1150 } 1151 1152 { 1153 for (i = 1; i < ARGC; i++) gsub(e[i], "") 1154 print; fflush() 1155 } 1156 ' "${@:-\r$}" 1157 } 1158 1159 # ignore command in a pipe: this allows quick re-editing of pipes, while 1160 # still leaving signs of previously-used steps, as a memo 1161 ignore() { cat; } 1162 1163 # only keep lines which case-insensitively match any of the regexes given 1164 imatch() { 1165 awk ' 1166 BEGIN { 1167 if (IGNORECASE == "") { 1168 m = "this variant of AWK lacks case-insensitive regex-matching" 1169 printf("\x1b[31m%s\x1b[0m\n", m) > "/dev/stderr" 1170 exit 125 1171 } 1172 IGNORECASE = 1 1173 1174 for (i = 1; i < ARGC; i++) { 1175 e[i] = ARGV[i] 1176 delete ARGV[i] 1177 } 1178 } 1179 1180 { 1181 for (i = 1; i < ARGC; i++) { 1182 if ($0 ~ e[i]) { 1183 print; fflush() 1184 got++ 1185 next 1186 } 1187 } 1188 } 1189 1190 END { exit(got == 0) } 1191 ' "${@:-[^\r]}" 1192 } 1193 1194 # emit each word-like item from each input line on its own line 1195 items() { awk '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@"; } 1196 1197 # case-insensitively deduplicate lines, keeping them in their original order: 1198 # the checking/matching is case-insensitive, but each first match is output 1199 # exactly as is 1200 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; } 1201 1202 # shrink/compact Json data, allowing an optional filepath 1203 # j0() { python -m json.tool --compact "${1:--}"; } 1204 1205 # shrink/compact Json using the `jq` app, allowing an optional filepath, and 1206 # even an optional transformation formula after that 1207 # j0() { jq -c -M "${2:-.}" "${1:--}"; } 1208 1209 # show Json data on multiple lines, using 2 spaces for each indentation level, 1210 # allowing an optional filepath 1211 # j2() { python -m json.tool --indent 2 "${1:--}"; } 1212 1213 # show Json data on multiple lines, using 2 spaces for each indentation level, 1214 # allowing an optional filepath, and even an optional transformation formula 1215 # after that 1216 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; } 1217 1218 # listen to streaming JAZZ music 1219 jazz() { 1220 printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n" 1221 # mpv https://stream.zeno.fm/00rt0rdm7k8uv 1222 mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv 1223 } 1224 1225 # show a `dad` JOKE from the web, sometimes even a very funny one 1226 joke() { 1227 curl -s https://icanhazdadjoke.com | fold -s | sed -u -E 's- *\r?$--' 1228 # plain-text output from previous cmd doesn't end with a line-feed 1229 printf "\n" 1230 } 1231 1232 # shrink/compact JSON data, allowing an optional filepath 1233 # json0() { python -m json.tool --compact "${1:--}"; } 1234 1235 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and 1236 # even an optional transformation formula after that 1237 json0() { jq -c -M "${2:-.}" "${1:--}"; } 1238 1239 # show JSON data on multiple lines, using 2 spaces for each indentation level, 1240 # allowing an optional filepath 1241 # json2() { python -m json.tool --indent 2 "${1:--}"; } 1242 1243 # show JSON data on multiple lines, using 2 spaces for each indentation level, 1244 # allowing an optional filepath, and even an optional transformation formula 1245 # after that 1246 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; } 1247 1248 # turn JSON Lines into a proper JSON array 1249 jsonl2json() { jq -s -M "${@:-.}"; } 1250 1251 # emit the given number of random/junk bytes, or 1024 junk bytes by default 1252 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; } 1253 1254 # only keep the file-extension part from lines ending with file-extensions 1255 # justext() { 1256 # awk ' 1257 # !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() } 1258 # ' "$@" 1259 # } 1260 1261 # only keep the file-extension part from lines ending with file-extensions 1262 justext() { 1263 awk ' 1264 !/^\./ && /\./ { 1265 if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) { 1266 print substr($0, RSTART, RLENGTH); fflush() 1267 } 1268 } 1269 ' "$@" 1270 } 1271 1272 # only keep lines ending with a file-extension of any popular picture format 1273 justpictures() { 1274 awk ' 1275 /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ { 1276 gsub(/ *\r?$/, ""); print; fflush() 1277 } 1278 ' "$@" 1279 } 1280 1281 # only keep lines ending with a file-extension of any popular sound format 1282 justsounds() { 1283 awk ' 1284 /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ { 1285 gsub(/ *\r?$/, ""); print; fflush() 1286 } 1287 ' "$@" 1288 } 1289 1290 # only keep lines ending with a file-extension of any popular video format 1291 justvideos() { 1292 awk ' 1293 /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ { 1294 gsub(/ *\r?$/, ""); print; fflush() 1295 } 1296 ' "$@" 1297 } 1298 1299 # convert binary KiloBytes into bytes 1300 kb() { 1301 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1302 awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' | 1303 sed 's-\.00*$--' 1304 } 1305 1306 # run `less`, showing line numbers, among other settings 1307 l() { less -JMKNiCRS "$@"; } 1308 1309 # Like A Book groups lines as 2 side-by-side pages, the same way books 1310 # do it; uses my script `book` 1311 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; } 1312 1313 # find the LAN (local-area network) IP address for this device 1314 lanip() { hostname -I; } 1315 1316 # Line xARGS: `xargs` using line separators, which handles filepaths 1317 # with spaces, as long as the standard input has 1 path per line 1318 largs() { xargs -d '\n' "$@"; } 1319 1320 # get the last n lines, or 1 by default 1321 # last() { tail -n "${1:-1}" "${2:--}"; } 1322 1323 # get up to the last given number of bytes 1324 lastbytes() { tail -c "${1:-1}" "${2:--}"; } 1325 1326 # get the last n lines, or 1 by default 1327 lastlines() { tail -n "${1:-1}" "${2:--}"; } 1328 1329 # turn UTF-8 into its latin-like subset, where variants of latin letters stay 1330 # as given, and where all other symbols become question marks, one question 1331 # mark for each code-point byte 1332 latinize() { 1333 iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8 1334 } 1335 1336 # Lowercased (lines) AWK 1337 lawk() { 1338 local code="${1:-1}" 1339 [ $# -gt 0 ] && shift 1340 awk " 1341 { 1342 line = orig = original = \$0 1343 low = lower = tolower(\$0) 1344 \$0 = lower 1345 } 1346 ${code} 1347 { fflush() } 1348 " "$@"; 1349 } 1350 1351 # convert pounds (LB) into kilograms 1352 lb() { 1353 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1354 awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }' 1355 } 1356 1357 # turn the first n space-separated fields on each line into tab-separated 1358 # ones; this behavior is useful to make the output of many cmd-line tools 1359 # into TSV, since filenames are usually the last fields, and these may 1360 # contain spaces which aren't meant to be split into different fields 1361 leadtabs() { 1362 local n="$1" 1363 local cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")" 1364 cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')" 1365 sed -u -E "${cmd}" 1366 } 1367 1368 # run `less`, showing line numbers, among other settings 1369 least() { less -JMKNiCRS "$@"; } 1370 1371 # limit stops at the first n bytes, or 1024 bytes by default 1372 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; } 1373 1374 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps, 1375 # and using the first n lines as a sticky-header (1 by default), so they 1376 # always show on top 1377 lh() { 1378 local n="${1:-1}" 1379 [ $# -gt 0 ] && shift 1380 less --header="$n" -JMKNiCRS "$@" 1381 } 1382 1383 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's 1384 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds, 1385 # and ensuring each input's last line ends with a line-feed 1386 lines() { 1387 awk ' 1388 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 1389 { gsub(/\r$/, ""); print; fflush() } 1390 ' "$@" 1391 } 1392 1393 # regroup adjacent lines into n-item tab-separated lines 1394 lineup() { 1395 local n="${1:-0}" 1396 [ $# -gt 0 ] && shift 1397 1398 if [ "$n" -le 0 ]; then 1399 awk ' 1400 NR > 1 { printf "\t" } 1401 { printf "%s", $0; fflush() } 1402 END { if (NR > 0) print "" } 1403 ' "$@" 1404 return $? 1405 fi 1406 1407 awk -v n="$n" ' 1408 NR % n != 1 && n > 1 { printf "\t" } 1409 { printf "%s", $0; fflush() } 1410 NR % n == 0 { print ""; fflush() } 1411 END { if (NR % n != 0) print "" } 1412 ' "$@" 1413 } 1414 1415 # find all hyperLINKS (https:// and http://) in the input text 1416 links() { 1417 awk ' 1418 BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" } 1419 { 1420 # match all links in the current line 1421 for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) { 1422 print substr(s, RSTART, RLENGTH); fflush() 1423 } 1424 } 1425 ' "$@" 1426 } 1427 1428 # List files, using the `Long` option 1429 # ll() { ls -l "$@"; } 1430 1431 # LOAD data from the filename or URI given; uses my script `get` 1432 load() { get "$@"; } 1433 1434 # LOwercase line, check (awk) COndition: on each success, the original line 1435 # is output with its original letter-casing, as its lower-cased version is 1436 # only a convenience meant for the condition 1437 loco() { 1438 local cond="${1:-1}" 1439 [ $# -gt 0 ] && shift 1440 awk " 1441 { 1442 line = orig = original = \$0 1443 low = lower = tolower(\$0) 1444 \$0 = lower 1445 } 1446 ${cond} { print line; fflush() } 1447 " "$@" 1448 } 1449 1450 # LOcal SERver webserves files in a folder as localhost, using the port 1451 # number given, or port 8080 by default 1452 loser() { 1453 printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2 1454 python3 -m http.server "${1:-8080}" -d "${2:-.}" 1455 } 1456 1457 # LOWercase all ASCII symbols 1458 low() { awk '{ print tolower($0); fflush() }' "$@"; } 1459 1460 # LOWERcase all ASCII symbols 1461 lower() { awk '{ print tolower($0); fflush() }' "$@"; } 1462 1463 # Live/Line-buffered RipGrep ensures results show/pipe up immediately 1464 lrg() { rg --line-buffered "$@"; } 1465 1466 # Listen To Youtube 1467 lty() { 1468 local url 1469 # some youtube URIs end with extra playlist/tracker parameters 1470 url="$(echo "$1" | sed 's-&.*--')" 1471 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 1472 } 1473 1474 # Match lines with any of the regexes given 1475 alias m=match 1476 1477 # only keep lines which match any of the regexes given 1478 match() { 1479 awk ' 1480 BEGIN { 1481 for (i = 1; i < ARGC; i++) { 1482 e[i] = ARGV[i] 1483 delete ARGV[i] 1484 } 1485 } 1486 1487 { 1488 for (i = 1; i < ARGC; i++) { 1489 if ($0 ~ e[i]) { 1490 print; fflush() 1491 got++ 1492 next 1493 } 1494 } 1495 } 1496 1497 END { exit(got == 0) } 1498 ' "${@:-[^\r]}" 1499 } 1500 1501 # MAX Width truncates lines up to the given number of items/bytes given, or up 1502 # to 80 by default; output lines end with an ANSI reset-code, in case input 1503 # lines use ANSI styles 1504 maxw() { 1505 local maxwidth="${1:-80}" 1506 [ $# -gt 0 ] && shift 1507 awk -v maxw="${maxwidth}" ' 1508 { 1509 gsub(/\r$/, "") 1510 printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush() 1511 } 1512 ' "$@" 1513 } 1514 1515 # convert binary MegaBytes into bytes 1516 mb() { 1517 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1518 awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' | 1519 sed 's-\.00*$--' 1520 } 1521 1522 # Multi-Core MAKE runs `make` using all cores 1523 mcmake() { make -j "$(nproc)" "$@"; } 1524 1525 # Multi-Core MaKe runs `make` using all cores 1526 mcmk() { make -j "$(nproc)" "$@"; } 1527 1528 # merge stderr into stdout, without any ugly keyboard-dancing 1529 # merrge() { "$@" 2>&1; } 1530 1531 # convert MIles into kilometers 1532 mi() { 1533 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1534 awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }' 1535 } 1536 1537 # convert MIles² (squared) into kilometers² 1538 mi2() { 1539 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1540 awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }' 1541 } 1542 1543 # Make In Folder 1544 mif() { 1545 local code 1546 pushd "${1:-.}" > /dev/null || return 1547 [ $# -gt 0 ] && shift 1548 make "$@" 1549 code=$? 1550 popd > /dev/null || return "${code}" 1551 return "${code}" 1552 } 1553 1554 # Media INFO 1555 # minfo() { mediainfo "$@" | less -JMKiCRS; } 1556 1557 # Media INFO 1558 # minfo() { ffprobe "$@" |& less -JMKiCRS; } 1559 1560 # quick alias for `make` 1561 # alias mk=make 1562 1563 # run `make` 1564 mk() { make "$@"; } 1565 1566 # convert Miles Per Hour into kilometers per hour 1567 mph() { 1568 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1569 awk '/./ { printf "%.2f\n", 1.609344 * $0 }' 1570 } 1571 1572 # Number all lines, using a tab right after each line number 1573 # n() { 1574 # local start="${1:-1}" 1575 # [ $# -gt 0 ] && shift 1576 # nl -b a -w 1 -v "${start}" "$@" 1577 # } 1578 1579 # Number all lines, using a tab right after each line number 1580 n() { 1581 local start="${1:-1}" 1582 [ $# -gt 0 ] && shift 1583 stdbuf -oL nl -b a -w 1 -v "${start}" "$@" 1584 } 1585 1586 # Not AND sorts its 2 inputs, then finds lines not in common 1587 nand() { 1588 # comm -3 <(sort "$1") <(sort "$2") 1589 # dash doesn't support the process-sub syntax 1590 (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 1591 } 1592 1593 # Nice Byte Count, using my scripts `nn` and `cext` 1594 nbc() { wc -c "$@" | nn --gray | cext; } 1595 1596 # NIce(r) COlumns makes the output of many commands whose output starts with 1597 # a header line easier to read; uses my script `nn` 1598 nico() { 1599 awk ' 1600 (NR - 1) % 5 == 1 && NR > 1 { print "" } 1601 { printf "%5d %s\n", NR - 1, $0; fflush() } 1602 ' "$@" | nn --gray | less -JMKiCRS 1603 } 1604 1605 # emit nothing to output and/or discard everything from input 1606 nil() { 1607 if [ $# -gt 0 ]; then 1608 "$@" > /dev/null 1609 else 1610 cat < /dev/null 1611 fi 1612 } 1613 1614 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers) 1615 njnn() { nj "$@" | nn --gray; } 1616 1617 # convert Nautical MIles into kilometers 1618 nmi() { 1619 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1620 awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }' 1621 } 1622 1623 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing 1624 # noerr() { "$@" 2> /dev/null; } 1625 1626 # play a white-noise sound lasting the number of seconds given, or for 1 1627 # second by default; uses my script `waveout` 1628 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; } 1629 1630 # show the current date and time 1631 now() { date +'%Y-%m-%d %H:%M:%S'; } 1632 1633 # Nice Processes shows/lists all current processes; uses my script `nn` 1634 np() { 1635 local res 1636 local code 1637 # res="$(ps "${@:-auxf}")" 1638 res="$(ps "${@:-aux}")" 1639 code=$? 1640 if [ "${code}" -ne 0 ]; then 1641 return "${code}" 1642 fi 1643 1644 echo "${res}" | awk ' 1645 BEGIN { 1646 d = strftime("%a %b %d") 1647 t = strftime("%H:%M:%S") 1648 # printf "%s %s\n\n", d, t 1649 # printf "\x1b[32m%s\x1b[0m \x1b[34m%s\x1b[0m\n\n", d, t 1650 # printf "%30s\x1b[32m%s\x1b[0m \x1b[34m%s\x1b[0m\n\n", "", d, t 1651 # printf "%30s%s %s\n\n", "", d, t 1652 printf "\x1b[7m%30s%s %s%30s\x1b[0m\n\n", "", d, t, "" 1653 } 1654 1655 (NR - 1) % 5 == 1 && NR > 1 { print "" } 1656 1657 $1 == "root" { 1658 # gsub(/^/, "\x1b[36m") 1659 # gsub(/\x1b\[0m/, "\x1b[0m\x1b[36m") 1660 gsub(/^/, "\x1b[34m") 1661 gsub(/ +/, "&\x1b[0m\x1b[34m") 1662 gsub(/$/, "\x1b[0m") 1663 } 1664 1665 { 1666 gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m") 1667 gsub(/0[:\.]00*/, "\x1b[38;2;135;135;175m&\x1b[0m") 1668 printf "%3d %s\n", NR - 1, $0 1669 } 1670 ' | nn --gray | less -JMKiCRS 1671 } 1672 1673 # Nice Size, using my scripts `nn` and `cext` 1674 ns() { wc -c "$@" | nn --gray | cext; } 1675 1676 # Nice Transform Json, using my scripts `tj`, and `nj` 1677 ntj() { tj "$@" | nj; } 1678 1679 # Nice TimeStamp 1680 nts() { 1681 ts '%Y-%m-%d %H:%M:%S' | 1682 sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2' 1683 } 1684 1685 # emit nothing to output and/or discard everything from input 1686 null() { 1687 if [ $# -gt 0 ]; then 1688 "$@" > /dev/null 1689 else 1690 cat < /dev/null 1691 fi 1692 } 1693 1694 # NULl-terminate LINES ends each stdin line with a null byte, instead of a 1695 # line-feed byte 1696 nullines() { 1697 awk ' 1698 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 1699 { gsub(/\r$/, ""); printf "%s\x00", $0; fflush() } 1700 ' "$@" 1701 } 1702 1703 # (Nice) What Are These (?) shows what the names given to it are/do, coloring 1704 # the syntax of shell functions 1705 nwat() { 1706 local a 1707 1708 if [ $# -eq 0 ]; then 1709 printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" > /dev/stderr 1710 return 1 1711 fi 1712 1713 local cmd="bat" 1714 # debian linux uses a different name for the `bat` app 1715 if [ -e "/usr/bin/batcat" ]; then 1716 cmd="batcat" 1717 fi 1718 1719 for a in "$@"; do 1720 # printf "\e[7m%-80s\e[0m\n" "$a" 1721 printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a" 1722 1723 # resolve 1 alias level 1724 if alias "$a" 2> /dev/null > /dev/null; then 1725 a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")" 1726 fi 1727 1728 if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then 1729 # resolved aliases with args/spaces in them would otherwise fail 1730 echo "$a" 1731 elif whence -f "$a" > /dev/null 2> /dev/null; then 1732 # zsh seems to show a shell function's code only via `whence -f` 1733 whence -f "$a" 1734 elif type "$a" > /dev/null 2> /dev/null; then 1735 # dash doesn't support `declare`, and `type` in bash emits 1736 # a redundant first output line, when it's a shell function 1737 type "$a" | awk ' 1738 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next } 1739 { print; fflush() } 1740 END { if (NR < 2 && skipped) print skipped } 1741 ' | "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \ 1742 --wrap=never --color=always | 1743 sed -u 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' 1744 else 1745 printf "\e[38;2;204;0;0m%s not found\e[0m\n" "$a" 1746 fi 1747 done | less -JMKiCRS 1748 } 1749 1750 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`, 1751 # alternating styles to make long numbers easier to read 1752 # nwc() { wc "$@" | nn --gray; } 1753 1754 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`, 1755 # alternating styles to make long numbers easier to read 1756 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; } 1757 1758 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and 1759 # `cext`, alternating styles to make long numbers easier to read 1760 nwc() { 1761 wc "$@" | sort -rn | nn --gray | cext | 1762 awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }' 1763 } 1764 1765 # Nice Zoom Json, using my scripts `zj`, and `nj` 1766 nzj() { zj "$@" | nj; } 1767 1768 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode 1769 # pawk() { awk -F='' -v RS='' "$@"; } 1770 1771 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode 1772 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 1773 1774 # Plain `fd` 1775 pfd() { fd --color=never "$@"; } 1776 1777 # pick lines, using all the 1-based line-numbers given 1778 picklines() { 1779 awk ' 1780 BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 } 1781 BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } } 1782 { l[++n] = $0 } 1783 END { 1784 for (i = 1; i <= m; i++) { 1785 j = p[i] 1786 if (j < 0) j += NR + 1 1787 if (0 < j && j <= NR) print l[j] 1788 } 1789 } 1790 ' "$@" 1791 } 1792 1793 # Plain Interactive Grep 1794 pig() { ugrep --color=never -Q -E "$@"; } 1795 1796 # make text plain, by ignoring ANSI terminal styling 1797 # plain() { 1798 # awk ' 1799 # { 1800 # gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers 1801 # print; fflush() 1802 # } 1803 # ' "$@" 1804 # } 1805 1806 # end all lines with an ANSI-code to reset styles 1807 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; } 1808 1809 # end all lines with an ANSI-code to reset styles 1810 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; } 1811 1812 # play audio/video media 1813 # play() { mplayer -msglevel all=-1 "${@:--}"; } 1814 1815 # play audio/video media 1816 play() { mpv "${@:--}"; } 1817 1818 # Pick LINE, using the 1-based line-number given 1819 pline() { 1820 local line="$1" 1821 [ $# -gt 0 ] && shift 1822 awk -v n="${line}" ' 1823 BEGIN { if (n < 1) exit 0 } 1824 NR == n { print; exit 0 } 1825 ' "$@" 1826 } 1827 1828 # Paused MPV; especially useful when trying to view pictures via `mpv` 1829 pmpv() { mpv --pause "${@:--}"; } 1830 1831 # Print Python result 1832 pp() { python -c "print($1)"; } 1833 1834 # PRecede (input) ECHO, prepends a first line to stdin lines 1835 precho() { echo "$@" && cat /dev/stdin; } 1836 1837 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines 1838 prememo() { 1839 awk ' 1840 BEGIN { 1841 if (ARGC > 1) printf "\x1b[7m" 1842 for (i = 1; i < ARGC; i++) { 1843 if (i > 1) printf " " 1844 printf "%s", ARGV[i] 1845 delete ARGV[i] 1846 } 1847 if (ARGC > 1) printf "\x1b[0m\n" 1848 fflush() 1849 } 1850 { print; fflush() } 1851 ' "$@" 1852 } 1853 1854 # start by joining all arguments given as a tab-separated-items line of output, 1855 # followed by all lines from stdin verbatim 1856 pretsv() { 1857 awk ' 1858 BEGIN { 1859 for (i = 1; i < ARGC; i++) { 1860 if (i > 1) printf "\t" 1861 printf "%s", ARGV[i] 1862 delete ARGV[i] 1863 } 1864 if (ARGC > 1) printf "\n" 1865 fflush() 1866 } 1867 { print; fflush() } 1868 ' "$@" 1869 } 1870 1871 # Plain Recursive Interactive Grep 1872 prig() { ugrep --color=never -r -Q -E "$@"; } 1873 1874 # show/list all current processes 1875 processes() { 1876 local res 1877 res="$(ps aux)" 1878 echo "${res}" | awk 1 | sed -E -u \ 1879 -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \ 1880 -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' 1881 } 1882 1883 # Play Youtube Audio 1884 pya() { 1885 local url 1886 # some youtube URIs end with extra playlist/tracker parameters 1887 url="$(echo "$1" | sed 's-&.*--')" 1888 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 1889 } 1890 1891 # Quiet ignores stderr, without any ugly keyboard-dancing 1892 q() { "$@" 2> /dev/null; } 1893 1894 # Quiet MPV 1895 qmpv() { mpv --quiet "${@:--}"; } 1896 1897 # ignore stderr, without any ugly keyboard-dancing 1898 quiet() { "$@" 2> /dev/null; } 1899 1900 # Reset the screen, which empties it and resets the current style 1901 alias r=reset 1902 1903 # keep only lines between the 2 line numbers given, inclusively 1904 rangelines() { 1905 { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] && 1906 { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; } 1907 } 1908 1909 # RANdom MANual page 1910 ranman() { 1911 find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename | 1912 sed 's-\.gz$--' | xargs man 1913 } 1914 1915 # Run AWK expression 1916 rawk() { 1917 local expr="${1:-0}" 1918 [ $# -gt 0 ] && shift 1919 awk "BEGIN { print ${expr}; exit }" "$@" 1920 } 1921 1922 # play a ready-phone-line sound lasting the number of seconds given, or for 1 1923 # second by default; uses my script `waveout` 1924 ready() { 1925 local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)' 1926 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 1927 } 1928 1929 # reflow/trim lines of prose (text) to improve its legibility: it's especially 1930 # useful when the text is pasted from web-pages being viewed in reader mode 1931 reprose() { 1932 local w="${1:-80}" 1933 [ $# -gt 0 ] && shift 1934 awk 'FNR == 1 && NR > 1 { print "" } { print; fflush() }' "$@" | 1935 fold -s -w "$w" | sed -u -E 's- *\r?$--' 1936 } 1937 1938 # ignore ansi styles from stdin and restyle things using the style-name given; 1939 # uses my script `style` 1940 restyle() { style "$@"; } 1941 1942 # change the tab-title on your terminal app 1943 retitle() { printf "\e]0;%s\a\n" "$*"; } 1944 1945 # REVerse-order SIZE (byte-count) 1946 revsize() { wc -c "$@" | sort -rn; } 1947 1948 # Run In Folder 1949 rif() { 1950 local code 1951 pushd "${1:-.}" > /dev/null || return 1952 [ $# -gt 0 ] && shift 1953 "$@" 1954 code=$? 1955 popd > /dev/null || return "${code}" 1956 return "${code}" 1957 } 1958 1959 # play a ringtone-style sound lasting the number of seconds given, or for 1 1960 # second by default; uses my script `waveout` 1961 ringtone() { 1962 local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))' 1963 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 1964 } 1965 1966 # Read-Only Micro (text editor) 1967 rom() { micro -readonly true "$@"; } 1968 1969 # run the command given, trying to turn its output into TSV (tab-separated 1970 # values); uses my script `dejson` 1971 rtab() { jc "$@" | dejson; } 1972 1973 # Right TRIM ignores trailing spaces, as well as trailing carriage returns 1974 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 1975 1976 # show a RULER-like width-measuring line 1977 ruler() { 1978 [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \ 1979 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-' 1980 } 1981 1982 # run the command given, trying to turn its output into TSV (tab-separated 1983 # values); uses my script `dejson` 1984 runtab() { jc "$@" | dejson; } 1985 1986 # run the command given, trying to turn its output into TSV (tab-separated 1987 # values); uses my script `dejson` 1988 runtsv() { jc "$@" | dejson; } 1989 1990 # Reverse-order WC 1991 rwc() { wc "$@" | sort -rn; } 1992 1993 # extended-mode Sed, enabling its full regex syntax 1994 # s() { sed -E -u "$@"; } 1995 1996 # Silent CURL spares you the progress bar, but still tells you about errors 1997 scurl() { curl --show-error -s "$@"; } 1998 1999 # show a unique-looking SEParator line; useful to run between commands 2000 # which output walls of text 2001 sep() { 2002 [ "${1:-80}" -gt 0 ] && 2003 printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g' 2004 } 2005 2006 # webSERVE files in a folder as localhost, using the port number given, or 2007 # port 8080 by default 2008 serve() { 2009 printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2 2010 python3 -m http.server "${1:-8080}" -d "${2:-.}" 2011 } 2012 2013 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input 2014 setdiff() { 2015 # comm -23 <(sort "$1") <(sort "$2") 2016 # dash doesn't support the process-sub syntax 2017 (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2018 } 2019 2020 # SET INtersection, sorts its 2 inputs, then finds common lines 2021 setin() { 2022 # comm -12 <(sort "$1") <(sort "$2") 2023 # dash doesn't support the process-sub syntax 2024 (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2025 } 2026 2027 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input 2028 setsub() { 2029 # comm -23 <(sort "$1") <(sort "$2") 2030 # dash doesn't support the process-sub syntax 2031 (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2032 } 2033 2034 # Show Files (and folders), coloring folders and links; uses my script `nn` 2035 sf() { 2036 ls -al --file-type --color=never --time-style iso "$@" | awk ' 2037 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2038 { 2039 gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m") 2040 gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m") 2041 printf "%6d %s\n", NR - 1, $0; fflush() 2042 } 2043 ' | nn --gray | less -JMKiCRS 2044 } 2045 2046 # Show Files (and folders) Plus, by coloring folders, links, and extensions; 2047 # uses my scripts `nn` and `cext` 2048 sfp() { 2049 ls -al --file-type --color=never --time-style iso "$@" | awk ' 2050 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2051 { 2052 gsub(/^(d[rwx-]+)/, "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m") 2053 gsub(/^(l[rwx-]+)/, "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m") 2054 printf "%6d %s\n", NR - 1, $0; fflush() 2055 } 2056 ' | nn --gray | cext | less -JMKiCRS 2057 } 2058 2059 # Show File Sizes, using my scripts `nn` and `cext` 2060 sfs() { 2061 # turn arg-list into single-item lines 2062 printf "%s\n" "$@" | 2063 # calculate file-sizes, and reverse-sort results 2064 xargs -d '\n' wc -c | sort -rn | 2065 # add/realign fields to improve legibility 2066 awk ' 2067 # start output with a header-like line, and add a MiB field 2068 BEGIN { printf "%6s %10s %8s name\n", "n", "bytes", "MiB"; fflush() } 2069 # make table breathe with empty lines, so tall outputs are readable 2070 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2071 # emit regular output lines 2072 { 2073 printf "%6d %10d %8.2f ", NR - 1, $1, $1 / 1048576 2074 # first field is likely space-padded 2075 gsub(/^ */, "") 2076 # slice line after the first field, as filepaths can have spaces 2077 $0 = substr($0, length($1) + 1) 2078 # first field is likely space-padded 2079 gsub(/^ /, "") 2080 printf "%s\n", $0; fflush() 2081 } 2082 ' | 2083 # make zeros in the MiB field stand out with a special color 2084 awk ' 2085 { 2086 gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m") 2087 print; fflush() 2088 } 2089 ' | 2090 # make numbers nice, alternating styles along 3-digit groups 2091 nn --gray | 2092 # color-code file extensions 2093 cext | 2094 # make result interactively browsable 2095 less -JMKiCRS 2096 } 2097 2098 # SHell-run AWK output 2099 # shawk() { stdbuf -oL awk "$@" | sh; } 2100 2101 # time-run various tools given one-per-line from stdin, giving them extra 2102 # common arguments passed as explicit arguments 2103 showdown() { 2104 awk ' 2105 BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } } 2106 { 2107 printf "%s", $0 2108 for (i = 1; i < ARGC; i++) printf " %s", a[i] 2109 printf "\n"; fflush() 2110 } 2111 ' "$@" | xargs -d '\n' hyperfine --style full 2112 } 2113 2114 # SHOW a command, then RUN it 2115 showrun() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; } 2116 2117 # skip the first n lines, or the 1st line by default 2118 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 2119 2120 # skip the first n bytes 2121 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; } 2122 2123 # skip the last n lines, or the last line by default 2124 skiplast() { head -n -"${1:-1}" "${2:--}"; } 2125 2126 # skip the last n bytes 2127 skiplastbytes() { head -c -"$1" "${2:--}"; } 2128 2129 # skip the last n lines, or the last line by default 2130 skiplastlines() { head -n -"${1:-1}" "${2:--}"; } 2131 2132 # skip the first n lines, or the 1st line by default 2133 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 2134 2135 # SLOW/delay lines from the standard-input, waiting the number of seconds 2136 # given for each line, or waiting 1 second by default 2137 slow() { 2138 local seconds="${1:-1}" 2139 ( 2140 IFS="$(printf "\n")" 2141 while read -r line; do 2142 sleep "${seconds}" 2143 printf "%s\n" "${line}" 2144 done 2145 ) 2146 } 2147 2148 # Show Latest Podcasts, using my scripts `podfeed` and `si` 2149 slp() { 2150 local title 2151 title="Latest Podcast Episodes as of $(date +'%F %T')" 2152 podfeed -title "${title}" "$@" | si 2153 } 2154 2155 # recursively find all files with fewer bytes than the number given 2156 smallfiles() { 2157 local n 2158 n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')" 2159 [ $# -gt 0 ] && shift 2160 2161 local arg 2162 for arg in "${@:-.}"; do 2163 if [ ! -d "${arg}" ]; then 2164 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 2165 return 1 2166 fi 2167 stdbuf -oL find "${arg}" -type f -size -"$n"c 2168 done 2169 } 2170 2171 # emit the first line as is, sorting all lines after that, using the 2172 # `sort` command, passing all/any arguments/options to it 2173 sortrest() { 2174 awk -v sort="sort $*" ' 2175 { gsub(/\r$/, "") } 2176 NR == 1 { print; fflush() } 2177 NR > 1 { print | sort } 2178 ' 2179 } 2180 2181 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after 2182 # that, using the `sort` command in TSV (tab-separated values) mode, passing 2183 # all/any arguments/options to it 2184 sortsv() { 2185 awk -v sort="sort -t \"$(printf '\t')\" $*" ' 2186 { gsub(/\r$/, "") } 2187 NR == 1 { print; fflush() } 2188 NR > 1 { print | sort } 2189 ' 2190 } 2191 2192 # emit a line with the number of spaces given in it 2193 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; } 2194 2195 # ignore leading spaces, trailing spaces, even runs of multiple spaces 2196 # in the middle of lines, as well as trailing carriage returns 2197 squeeze() { 2198 awk ' 2199 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2200 { 2201 gsub(/^ +| *\r?$/, "") 2202 gsub(/ *\t */, "\t") 2203 gsub(/ +/, " ") 2204 print; fflush() 2205 } 2206 ' "$@" 2207 } 2208 2209 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs 2210 # of multiple spaces in the middle of lines, as well as trailing carriage 2211 # returns, while also turning runs of empty lines into single empty lines, 2212 # and ignoring leading/trailing empty lines, effectively also `squeezing` 2213 # lines vertically 2214 squomp() { 2215 awk ' 2216 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2217 /^\r?$/ { empty = 1; next } 2218 empty { if (n > 0) print ""; empty = 0 } 2219 { 2220 gsub(/^ +| *\r?$/, "") 2221 gsub(/ *\t */, "\t") 2222 gsub(/ +/, " ") 2223 print; fflush() 2224 n++ 2225 } 2226 ' "$@" 2227 } 2228 2229 # Show a command, then Run it 2230 sr() { printf "\e[7m%s\e[0m\n" "$*" && "$@"; } 2231 2232 # turn runs of empty lines into single empty lines, effectively squeezing 2233 # paragraphs vertically, so to speak; runs of empty lines both at the start 2234 # and at the end are ignored 2235 stomp() { 2236 awk ' 2237 /^\r?$/ { empty = 1; next } 2238 empty { if (n > 0) print ""; empty = 0 } 2239 { print; fflush(); n++ } 2240 ' "$@" 2241 } 2242 2243 # STRike-thru (lines) with AWK 2244 strawk() { 2245 local cond="${1:-1}" 2246 [ $# -gt 0 ] && shift 2247 awk ' 2248 { low = lower = tolower($0) } 2249 '"${cond}"' { 2250 gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m") 2251 printf "\x1b[9m%s\x1b[0m\n", $0 2252 fflush() 2253 next 2254 } 2255 { print; fflush() } 2256 ' "$@" 2257 } 2258 2259 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after 2260 # that, using the `sort` command in TSV (tab-separated values) mode, passing 2261 # all/any arguments/options to it 2262 stsv() { 2263 awk -v sort="sort -t \"$(printf '\t')\" $*" ' 2264 { gsub(/\r$/, "") } 2265 NR == 1 { print; fflush() } 2266 NR > 1 { print | sort } 2267 ' 2268 } 2269 2270 # use the result of the `awk` function `substr` for each line 2271 substr() { 2272 local start="${1:-1}" 2273 local length="${2:-80}" 2274 [ $# -gt 0 ] && shift 2275 [ $# -gt 0 ] && shift 2276 awk -v start="${start}" -v len="${length}" \ 2277 '{ printf "%s\n", substr($0, start, len); fflush() }' "$@" 2278 } 2279 2280 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with 2281 # what's given, before relinquishing existing privileges 2282 # sudoff() { 2283 # local code=0 2284 # if [ $# -gt 0 ]; then 2285 # sudo "$@" 2286 # code=$? 2287 # fi 2288 # sudo -k 2289 # return "${code}" 2290 # } 2291 2292 # append a final Tab-Separated-Values line with the sums of all columns from 2293 # the input table(s) given; items from first lines aren't counted/added 2294 sumtsv() { 2295 awk -F "\t" ' 2296 { 2297 print; fflush() 2298 if (width < NF) width = NF 2299 } 2300 2301 FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 } 2302 2303 END { 2304 for (i = 1; i <= width; i++) { 2305 if (i > 1) printf "\t" 2306 printf "%s", sums[i] "" 2307 } 2308 if (width > 0) printf "\n" 2309 } 2310 ' "$@" 2311 } 2312 2313 # show a random command defined in `clam`, using `wat` from `clam` itself 2314 surprise() { 2315 wat "$(grep -E '^[a-z]+\(' "$(which clam)" | shuf -n 1 | sed -E 's-\(.*--')" 2316 } 2317 2318 # Time the command given 2319 t() { time "$@"; } 2320 2321 # show a reverse-sorted tally of all lines read, where ties are sorted 2322 # alphabetically 2323 tally() { 2324 awk -v sort="sort -t \"$(printf '\t')\" -rnk2 -k1d" ' 2325 # reassure users by instantly showing the header 2326 BEGIN { print "value\ttally"; fflush() } 2327 { gsub(/\r$/, ""); t[$0]++ } 2328 END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sort } } 2329 ' "$@" 2330 } 2331 2332 # Tab AWK: TSV-specific I/O settings for `awk` 2333 # tawk() { awk -F "\t" -v OFS="\t" "$@"; } 2334 2335 # Tab AWK: TSV-specific I/O settings for `awk` 2336 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; } 2337 2338 # quick alias for my script `tbp` 2339 alias tb=tbp 2340 2341 # Title ECHO changes the tab-title on your terminal app 2342 techo() { printf "\e]0;%s\a\n" "$*"; } 2343 2344 # simulate the cadence of old-fashioned teletype machines, by slowing down 2345 # the output of ASCII/UTF-8 symbols from the standard-input 2346 teletype() { 2347 awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | ( 2348 IFS="$(printf "\n")" 2349 while read -r line; do 2350 echo "${line}" | sed -E 's-(.)-\1\n-g' | 2351 while read -r item; do 2352 sleep 0.01 2353 printf "%s" "${item}" 2354 done 2355 sleep 0.75 2356 printf "\n" 2357 done 2358 ) 2359 } 2360 2361 # run `top` without showing any of its output after quitting it 2362 tip() { tput smcup; top "$@"; tput rmcup; } 2363 2364 # change the tab-title on your terminal app 2365 title() { printf "\e]0;%s\a\n" "$*"; } 2366 2367 # quick alias for my script `tjp` 2368 alias tj=tjp 2369 2370 # quick alias for my script `tlp` 2371 alias tl=tlp 2372 2373 # show current date in a specifc format 2374 today() { date +'%Y-%m-%d %a %b %d'; } 2375 2376 # get the first n lines, or 1 by default 2377 toline() { head -n "${1:-1}" "${2:--}"; } 2378 2379 # lowercase all ASCII symbols 2380 tolower() { awk '{ print tolower($0); fflush() }' "$@"; } 2381 2382 # play a tone/sine-wave sound lasting the number of seconds given, or for 1 2383 # second by default: after the optional duration, the next optional arguments 2384 # are the volume and the tone-frequency; uses my script `waveout` 2385 tone() { 2386 waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" | 2387 mpv --really-quiet - 2388 } 2389 2390 # get the processes currently using the most cpu 2391 topcpu() { 2392 local n="${1:-10}" 2393 [ "$n" -gt 0 ] && ps aux | awk ' 2394 NR == 1 { print; fflush() } 2395 NR > 1 { print | "sort -rnk3" } 2396 ' | head -n "$(("$n" + 1))" 2397 } 2398 2399 # show all files directly in the folder given, without looking any deeper 2400 topfiles() { 2401 local arg 2402 for arg in "${@:-.}"; do 2403 if [ ! -d "${arg}" ]; then 2404 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 2405 return 1 2406 fi 2407 stdbuf -oL find "${arg}" -maxdepth 1 -type f 2408 done 2409 } 2410 2411 # show all folders directly in the folder given, without looking any deeper 2412 topfolders() { 2413 local arg 2414 for arg in "${@:-.}"; do 2415 if [ ! -d "${arg}" ]; then 2416 printf "\e[31mno folder named %s\e[0m\n" "${arg}" > /dev/stderr 2417 return 1 2418 fi 2419 stdbuf -oL find "${arg}" -maxdepth 1 -type d | 2420 awk '!/^\.$/ { print; fflush() }' 2421 done 2422 } 2423 2424 # get the processes currently using the most memory 2425 topmemory() { 2426 local n="${1:-10}" 2427 [ "$n" -gt 0 ] && ps aux | awk ' 2428 NR == 1 { print; fflush() } 2429 NR > 1 { print | "sort -rnk6" } 2430 ' | head -n "$(("$n" + 1))" 2431 } 2432 2433 # transpose (switch) rows and columns from tables 2434 transpose() { 2435 awk ' 2436 { gsub(/\r$/, "") } 2437 2438 NR == 1 && /\t/ { FS = "\t"; $0 = $0 } 2439 2440 { 2441 for (i = 1; i <= NF; i++) lines[i][NR] = $i 2442 if (maxitems < NF) maxitems = NF 2443 } 2444 2445 END { 2446 for (j = 1; j <= maxitems; j++) { 2447 for (i = 1; i <= NR; i++) { 2448 if (i > 1) printf "\t" 2449 printf "%s", lines[j][i] 2450 } 2451 printf "\n" 2452 } 2453 } 2454 ' "$@" 2455 } 2456 2457 # ignore leading/trailing spaces, as well as trailing carriage returns 2458 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2459 2460 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the 2461 # decimal dots themselves, when decimals in a number are all zeros; works 2462 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub` 2463 # trimdecs() { 2464 # awk ' 2465 # { 2466 # $0 = gensub(/([0-9]+)\.0+/, "\\1", "g") 2467 # $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g") 2468 # print; fflush() 2469 # } 2470 # ' "$@" 2471 # } 2472 2473 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the 2474 # decimal dots themselves, when decimals in a number are all zeros 2475 trimdecs() { 2476 awk '{ print; fflush() }' "$@" | 2477 sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g' 2478 } 2479 2480 # ignore trailing spaces, as well as trailing carriage returns 2481 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2482 2483 # ignore trailing spaces, as well as trailing carriage returns 2484 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2485 2486 # ignore leading/trailing spaces, as well as trailing carriage returns 2487 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2488 2489 # ignore leading/trailing spaces, as well as trailing carriage returns 2490 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2491 2492 # ignore trailing spaces, as well as trailing carriage returns 2493 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2494 2495 # ignore trailing spaces, as well as trailing carriage returns 2496 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2497 2498 # try running a command, emitting an explicit message to standard-error 2499 # if the command given fails 2500 try() { 2501 "$@" || { 2502 printf "\n\e[31m%s \e[41m\e[97m failed \e[0m\n" "$*" >&2 2503 return 255 2504 } 2505 } 2506 2507 # Transform Strings with Python; uses my script `tbp` 2508 tsp() { tbp -s "$@"; } 2509 2510 # run the command given, trying to turn its output into TSV (tab-separated 2511 # values); uses my script `dejson` 2512 tsvrun() { jc "$@" | dejson; } 2513 2514 # deduplicate lines, keeping them in their original order 2515 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; } 2516 2517 # concatenate all named input sources unix-style: all trailing CRLFs become 2518 # single LFs, each non-empty input will always end in a LF, so lines from 2519 # different sources are accidentally joined; also leading UTF-8 BOMs on the 2520 # first line of each input are ignored, as those are useless at best 2521 unixify() { 2522 awk ' 2523 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2524 { gsub(/\r$/, ""); print; fflush() } 2525 ' "$@" 2526 } 2527 2528 # go UP n folders, or go up 1 folder by default 2529 up() { 2530 if [ "${1:-1}" -le 0 ]; then 2531 cd . 2532 return $? 2533 fi 2534 2535 cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $? 2536 } 2537 2538 # convert United States Dollars into CAnadian Dollars, using the latest 2539 # official exchange rates from the bank of canada; during weekends, the 2540 # latest rate may be from a few days ago; the default amount of usd to 2541 # convert is 1, when not given 2542 usd2cad() { 2543 local site='https://www.bankofcanada.ca/valet/observations/group' 2544 local csv_rates="${site}/FX_RATES_DAILY/csv" 2545 local url 2546 url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')" 2547 curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" ' 2548 /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i } 2549 END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }' 2550 } 2551 2552 # View with `less` 2553 v() { less -JMKiCRS "$@"; } 2554 2555 # run a command, showing its success/failure right after 2556 verdict() { 2557 local code 2558 "$@" 2559 code=$? 2560 2561 if [ "${code}" -eq 0 ]; then 2562 printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" >&2 2563 else 2564 printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" >&2 2565 fi 2566 return "${code}" 2567 } 2568 2569 # run `cppcheck` with even stricter options 2570 vetc() { cppcheck --enable=portability --enable=style "$@"; } 2571 2572 # run `cppcheck` with even stricter options 2573 vetcpp() { cppcheck --enable=portability --enable=style "$@"; } 2574 2575 # check shell scripts for common gotchas, avoiding complaints about using 2576 # the `local` keyword, which is widely supported in practice 2577 vetshell() { shellcheck -e 3043 "$@"; } 2578 2579 # View with Header runs `less` without line numbers, with ANSI styles, no 2580 # line-wraps, and using the first n lines as a sticky-header (1 by default), 2581 # so they always show on top 2582 vh() { 2583 local n="${1:-1}" 2584 [ $# -gt 0 ] && shift 2585 less --header="$n" -JMKiCRS "$@" 2586 } 2587 2588 # View Nice Columns; uses my scripts `realign` and `nn` 2589 vnc() { realign "$@" | nn --gray | less -JMKiCRS; } 2590 2591 # View Nice Hexadecimals; uses my script `nh` 2592 vnh() { nh "$@" | less -JMKiCRS; } 2593 2594 # View Nice Json / Very Nice Json; uses my scripts `nj` and `nn` 2595 vnj() { nj "$@" | less -JMKiCRS; } 2596 2597 # View Very Nice Json with Nice Numbers; uses my scripts `nj` and `nn` 2598 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; } 2599 2600 # View Nice Numbers; uses my script `nn` 2601 vnn() { nn "${@:---gray}" | less -JMKiCRS; } 2602 2603 # View Nice Table / Very Nice Table; uses my scripts `nt` and `nn` 2604 vnt() { 2605 awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" | 2606 nt | nn --gray | 2607 awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' | 2608 less -JMKiCRS #--header=1 2609 } 2610 2611 # View Text with `less` 2612 # vt() { less -JMKiCRS "$@"; } 2613 2614 # What are these (?); uses my command `nwat` 2615 # alias w=nwat 2616 2617 # What Are These (?) shows what the names given to it are/do 2618 wat() { 2619 local a 2620 2621 if [ $# -eq 0 ]; then 2622 printf "\e[31mwat: no names given\e[0m\n" > /dev/stderr 2623 return 1 2624 fi 2625 2626 for a in "$@"; do 2627 # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "$a" 2628 printf "\e[7m%-80s\e[0m\n" "$a" 2629 2630 # resolve 1 alias level 2631 if alias "$a" 2> /dev/null > /dev/null; then 2632 a="$(alias "$a" | sed "s-.*=--; s-['\"]--g")" 2633 fi 2634 2635 if echo "$a" | grep -E '[^ ]+ +[^ ]+' > /dev/null; then 2636 # resolved aliases with args/spaces in them would otherwise fail 2637 echo "$a" 2638 elif whence -f "$a" > /dev/null 2> /dev/null; then 2639 # zsh seems to show a shell function's code only via `whence -f` 2640 whence -f "$a" 2641 elif type "$a" > /dev/null 2> /dev/null; then 2642 # dash doesn't support `declare`, and `type` in bash emits 2643 # a redundant first output line, when it's a shell function 2644 type "$a" | awk ' 2645 NR == 1 && /^[a-z0-9_-]+ is a function$/ { skipped = $0; next } 2646 { print; fflush() } 2647 END { if (NR < 2 && skipped) print skipped } 2648 ' 2649 else 2650 printf "\e[31m%s not found\e[0m\n" "$a" 2651 fi 2652 done | less -JMKiCRS 2653 } 2654 2655 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated 2656 # lines instead 2657 wctsv() { 2658 printf "file\tbytes\tlines\tcharacters\twords\tlongest\n" 2659 stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \ 2660 's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' | 2661 awk ' 2662 NR > 1 { print prev; fflush() } 2663 { prev = $0 } 2664 END { if (NR == 1 || !/^total\t/) print } 2665 ' 2666 } 2667 2668 # get weather forecasts, almost filling the terminal's current width 2669 weather() { 2670 printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" | 2671 curl --show-error -s telnet://graph.no:79 | 2672 sed -u -E \ 2673 -e 's/ *\r?$//' \ 2674 -e '/^\[/d' \ 2675 -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \ 2676 -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \ 2677 -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \ 2678 -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \ 2679 -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \ 2680 -e 's/\*/○/g' 2681 } 2682 2683 # recursively find all files with trailing spaces/CRs 2684 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; } 2685 2686 # recursively find all files with trailing spaces/CRs 2687 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; } 2688 2689 # run `xargs`, using whole lines as extra arguments 2690 x() { xargs -d '\n' "$@"; } 2691 2692 # run `xargs`, using zero/null bytes as the extra-arguments terminator 2693 x0() { xargs -0 "$@"; } 2694 2695 # run `xargs`, using whole lines as extra arguments 2696 xl() { xargs -d '\n' "$@"; } 2697 2698 # Youtube Audio Player 2699 yap() { 2700 local url 2701 # some youtube URIs end with extra playlist/tracker parameters 2702 url="$(echo "$1" | sed 's-&.*--')" 2703 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 2704 } 2705 2706 # show a calendar for the current year, or for the year given 2707 year() { 2708 { 2709 # show the current date/time center-aligned 2710 printf "%22s\e[32m%s\e[0m \e[34m%s\e[0m\n\n" \ 2711 "" "$(date +'%a %b %d')" "$(date +%T)" 2712 # show a whole-year calendar 2713 cal -y "$@" 2714 } | less -JMKiCRS 2715 } 2716 2717 # show the current date in the YYYY-MM-DD format 2718 ymd() { date +'%Y-%m-%d'; } 2719 2720 # YouTube Url 2721 ytu() { 2722 local url 2723 # some youtube URIs end with extra playlist/tracker parameters 2724 url="$(echo "$1" | sed 's-&.*--')" 2725 [ $# -gt 0 ] && shift 2726 yt-dlp "$@" --get-url "${url}" 2727 }