File: clam.sh 1 #!/bin/sh 2 3 # The MIT License (MIT) 4 # 5 # Copyright © 2020-2025 pacman64 6 # 7 # Permission is hereby granted, free of charge, to any person obtaining a copy 8 # of this software and associated documentation files (the “Software”), to deal 9 # in the Software without restriction, including without limitation the rights 10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 # copies of the Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice shall be included in 15 # all copies or substantial portions of the Software. 16 # 17 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 # SOFTWARE. 24 25 26 # clam 27 # 28 # Command-Line Augmentation Module (clam): get the best out of your shell 29 # 30 # 31 # This is a collection of arguably useful shell functions and shortcuts: 32 # some of these extra commands can be real time/effort savers, ideally 33 # letting you concentrate on getting things done. 34 # 35 # Some of these commands depend on my other scripts from the `pac-tools`, 36 # others either rely on widely-preinstalled command-line apps, or ones 37 # which are available on most of the major command-line `package` managers. 38 # 39 # Among these commands, you'll notice a preference for lines whose items 40 # are tab-separated instead of space-separated, and unix-style lines, which 41 # always end with a line-feed, instead of a CRLF byte-pair. This convention 42 # makes plain-text data-streams less ambiguous and generally easier to work 43 # with, especially when passing them along pipes. 44 # 45 # To use this script, you're supposed to `source` it, so its definitions 46 # stay for your whole shell session: for that, you can run `source clam` or 47 # `. clam` (no quotes either way), either directly or at shell startup. 48 # 49 # This script is compatible with `bash`, `zsh`, and even `dash`, which is 50 # debian linux's default non-interactive shell. Some of its commands even 51 # seem to work on busybox's shell. 52 53 54 case "$1" in 55 -h|--h|-help|--help) 56 # show help message, using the info-comment from this very script 57 awk ' 58 /^case / { exit } 59 /^# +clam$/, /^$/ { gsub(/^# ?/, ""); print } 60 ' "$0" 61 exit 0 62 ;; 63 esac 64 65 66 # dash doesn't support regex-matching syntax, forcing to use case statements 67 case "$0" in 68 -bash|-dash|-sh|bash|dash|sh) 69 # script is being sourced with bash or dash, which is good 70 : 71 ;; 72 *) 73 case "$ZSH_EVAL_CONTEXT" in 74 *:file) 75 # script is being sourced with zsh, which is good 76 : 77 ;; 78 *) 79 # script is being run normally, which is a waste of time 80 printf "\e[48;2;255;255;135m\e[38;2;0;0;0mDon't run this script, source it instead: to do that,\e[0m\n" 81 printf "\e[48;2;255;255;135m\e[38;2;0;0;0mrun 'source clam' or '. clam' (no quotes either way).\e[0m\n" 82 # failing during shell-startup may deny shell access, so exit 83 # with a 0 error-code to declare success 84 exit 0 85 ;; 86 esac 87 ;; 88 esac 89 90 91 # n-column-layout shortcuts, using my script `bsbs` (Book-like Side By Side) 92 alias 1='bsbs 1' 93 alias 2='bsbs 2' 94 alias 3='bsbs 3' 95 alias 4='bsbs 4' 96 alias 5='bsbs 5' 97 alias 6='bsbs 6' 98 alias 7='bsbs 7' 99 alias 8='bsbs 8' 100 alias 9='bsbs 9' 101 alias 0='bsbs 10' 102 103 # find name from the local `apt` database of installable packages 104 aptfind() { 105 { 106 printf "\e[7m%-80s\e[0m\n\n" "$1" 107 # despite warnings, the `apt search` command has been around for years 108 # apt search "$1" 2>/dev/null | rg -A 1 "^$1" | sed -u 's/^--$//' 109 apt search "$1" 2>/dev/null | rg -A 1 "^[a-z0-9-]*$1" | 110 sed -u 's/^--$//' 111 } | less -JMKiCRS 112 } 113 114 # APT UPdate/grade 115 aptup() { sudo apt update && sudo apt upgrade; sudo -k; } 116 117 # emit each argument given as its own line of output 118 args() { awk 'BEGIN { for (i = 1; i < ARGC; i++) print ARGV[i]; exit }' "$@"; } 119 120 # turn UTF-8 into visible pseudo-ASCII, where variants of latin letters become 121 # their basic ASCII counterparts, and where non-ASCII symbols become question 122 # marks, one question mark for each code-point byte 123 asciify() { iconv -f utf-8 -t ascii//translit "$@"; } 124 125 # avoid/ignore lines which match any of the regexes given 126 avoid() { 127 awk ' 128 BEGIN { 129 for (i = 1; i < ARGC; i++) { 130 e[i] = ARGV[i] 131 delete ARGV[i] 132 } 133 } 134 135 { 136 for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next 137 print; fflush() 138 got++ 139 } 140 141 END { exit(got == 0) } 142 ' "${@:-^\r?$}" 143 } 144 145 # AWK Begin 146 # awkb() { awk "BEGIN { $1; exit }"; } 147 148 # AWK Begin 149 awkb() { awk "BEGIN { $1; exit }"; } 150 151 # emit a line with a repeating ball-like symbol in it 152 balls() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -●-g'; } 153 154 # show an ansi-styled BANNER-like line 155 # banner() { printf "\e[7m%s\e[0m\n" "$*"; } 156 157 # show an ansi-styled BANNER-like line 158 banner() { printf "\e[7m%-$(tput cols)s\e[0m\n" "$*"; } 159 160 # emit a colored bar which can help visually separate different outputs 161 bar() { 162 [ "${1:-80}" -gt 0 ] && 163 printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" 164 } 165 166 # process Blocks/paragraphs of non-empty lines with AWK 167 # bawk() { awk -F='' -v RS='' "$@"; } 168 169 # process Blocks/paragraphs of non-empty lines with AWK 170 bawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 171 172 # play a repeating and annoying high-pitched beep sound a few times a second, 173 # lasting the number of seconds given, or for 1 second by default; uses my 174 # script `waveout` 175 beeps() { 176 local f='sin(2_000 * tau * t) * (t % 0.5 < 0.0625)' 177 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 178 } 179 180 # start by joining all arguments given as a tab-separated-items line of output, 181 # followed by all lines from stdin verbatim 182 begintsv() { 183 awk ' 184 BEGIN { 185 for (i = 1; i < ARGC; i++) { 186 if (i > 1) printf "\t" 187 printf "%s", ARGV[i] 188 delete ARGV[i] 189 } 190 if (ARGC > 1) printf "\n" 191 fflush() 192 } 193 { print; fflush() } 194 ' "$@" 195 } 196 197 # play a repeating synthetic-bell-like sound lasting the number of seconds 198 # given, or for 1 second by default; uses my tool `waveout` 199 bell() { 200 local f='sin(880*tau*u) * exp(-10*u)' 201 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 202 } 203 204 # play a repeating sound with synthetic-bells, lasting the number of seconds 205 # given, or for 1 second by default; uses my tool `waveout` 206 bells() { 207 local f="sum(sin(880*tau*v)*exp(-10*v) for v in (u, (u-0.25)%1)) / 2" 208 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 209 } 210 211 # Breathe Header: add an empty line after the first one (the header), then 212 # separate groups of 5 lines (by default) with empty lines between them 213 bh() { 214 local n="${1:-5}" 215 [ $# -gt 0 ] && shift 216 awk -v n="$n" ' 217 BEGIN { if (n == 0) n = -1 } 218 (NR - 1) % n == 1 && NR > 1 { print "" } 219 { print; fflush() } 220 ' "$@" 221 } 222 223 # recursively find all files with at least the number of bytes given; when 224 # not given a minimum byte-count, the default is 100 binary megabytes 225 bigfiles() { 226 local n 227 n="$(echo "${1:-104857600}" | sed -E 's-_--g; s-\.[0-9]+$--')" 228 [ $# -gt 0 ] && shift 229 230 local arg 231 for arg in "${@:-.}"; do 232 if [ ! -d "${arg}" ]; then 233 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 234 return 1 235 fi 236 stdbuf -oL find "${arg}" -type f -size "$n"c -o -size +"$n"c 237 done 238 } 239 240 # Breathe Lines: separate groups of 5 lines (by default) with empty lines 241 bl() { 242 local n="${1:-5}" 243 [ $# -gt 0 ] && shift 244 awk -v n="$n" ' 245 BEGIN { if (n == 0) n = -1 } 246 NR % n == 1 && NR != 1 { print "" } 247 { print; fflush() } 248 ' "$@" 249 } 250 251 # process BLocks/paragraphs of non-empty lines with AWK 252 # blawk() { awk -F='' -v RS='' "$@"; } 253 254 # process BLocks/paragraphs of non-empty lines with AWK 255 blawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 256 257 # emit a line with a repeating block-like symbol in it 258 blocks() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -█-g'; } 259 260 # Book-like MANual, lays out `man` docs as pairs of side-by-side pages; uses 261 # my script `bsbs` 262 bman() { 263 local w 264 w="$(tput cols)" 265 w="$((w / 2 - 4))" 266 if [ "$w" -lt 65 ]; then 267 w=65 268 fi 269 MANWIDTH="$w" man "$@" | bsbs 2 270 } 271 272 # Begin-Only Awk 273 # boa() { awk "BEGIN { $1; exit }"; } 274 275 # Begin-Only Awk 276 boa() { awk "BEGIN { $1; exit }"; } 277 278 # Begin-Only AWK 279 # boawk() { awk "BEGIN { $1; exit }"; } 280 281 # Begin-Only AWK 282 boawk() { awk "BEGIN { $1; exit }"; } 283 284 # BOOK-like MANual, lays out `man` docs as pairs of side-by-side pages; uses 285 # my script `bsbs` 286 bookman() { 287 local w 288 w="$(tput cols)" 289 w="$((w / 2 - 4))" 290 if [ "$w" -lt 65 ]; then 291 w=65 292 fi 293 MANWIDTH="$w" man "$@" | bsbs 2 294 } 295 296 # split lines using the regex given, turning them into single-item lines 297 breakdown() { 298 local sep="${1:- }" 299 [ $# -gt 0 ] && shift 300 awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@" 301 } 302 303 # separate groups of 5 lines (by default) with empty lines 304 breathe() { 305 local n="${1:-5}" 306 [ $# -gt 0 ] && shift 307 awk -v n="$n" ' 308 BEGIN { if (n == 0) n = -1 } 309 NR % n == 1 && NR != 1 { print "" } 310 { print; fflush() } 311 ' "$@" 312 } 313 314 # Browse Text 315 bt() { less -JMKNiCRS "$@"; } 316 317 # show a reverse-sorted tally of all lines read, where ties are sorted 318 # alphabetically, and where trailing bullets are added to quickly make 319 # the tally counts comparable at a glance 320 bully() { 321 awk -v sortcmd="sort -t \"$(printf '\t')\" -rnk2 -k1d" ' 322 # reassure users by instantly showing the header 323 BEGIN { print "value\ttally\tbullets"; fflush() } 324 325 { gsub(/\r$/, ""); tally[$0]++ } 326 327 END { 328 # find the max tally, which is needed to build the bullets-string 329 max = 0 330 for (k in tally) { 331 if (max < tally[k]) max = tally[k] 332 } 333 334 # make enough bullets for all tallies: this loop makes growing the 335 # string a task with complexity O(n * log n), instead of a naive 336 # O(n**2), which can slow-down things when tallies are high enough 337 bullet = "•" 338 bullets = bullet 339 for (n = max; n > 1; n /= 2) { 340 bullets = bullets bullets 341 } 342 343 # emit unsorted output lines to the sort cmd, which will emit the 344 # final reverse-sorted tally lines 345 for (k in tally) { 346 t = tally[k] 347 s = (t == 1) ? bullet : substr(bullets, 1, t) 348 printf "%s\t%d\t%s\n", k, t, s | sortcmd 349 } 350 } 351 ' "$@" 352 } 353 354 # play a busy-phone-line sound lasting the number of seconds given, or for 1 355 # second by default; uses my tool `waveout` 356 busy() { 357 # local f='(u < 0.5) * (sin(480*tau * t) + sin(620*tau * t)) / 2' 358 local f='min(1, exp(-90*(u-0.5))) * (sin(480*tau*t) + sin(620*tau*t)) / 2' 359 # local f='(sin(350*tau*t) + sin(450*tau*t)) / 2 * min(1, exp(-90*(u-0.5)))' 360 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 361 } 362 363 # keep all BUT the FIRST (skip) n lines, or skip just the 1st line by default 364 butfirst() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 365 366 # keep all BUT the LAST n lines, or skip just the last line by default 367 butlast() { head -n -"${1:-1}" "${2:--}"; } 368 369 # load bytes from the filenames given 370 bytes() { cat "$@"; } 371 372 c() { cat "$@"; } 373 374 # CAlculator with Nice numbers runs my script `ca` and colors results with 375 # my script `nn`, alternating styles to make long numbers easier to read 376 can() { ca "$@" | nn --gray; } 377 378 # uppercase the first letter on each line, and lowercase all later letters 379 capitalize() { 380 awk '{ print; fflush() }' "$@" | sed -E 's-^(.*)-\L\1-; s-^(.)-\u\1-' 381 } 382 383 # conCATenate Lines guarantees no lines are ever accidentally joined 384 # across inputs, always emitting a line-feed at the end of every line 385 catl() { awk '{ print; fflush() }' "$@"; } 386 387 # Csv AWK: CSV-specific input settings for `awk` 388 # cawk() { awk --csv "$@"; } 389 390 # Csv AWK: CSV-specific input settings for `awk` 391 cawk() { stdbuf -oL awk --csv "$@"; } 392 393 # Compile C Stripped 394 ccs() { cc -Wall -O2 -s -fanalyzer "$@"; } 395 396 # center-align lines of text, using the current screen width 397 center() { 398 awk -v width="$(tput cols)" ' 399 { 400 gsub(/\r$/, "") 401 lines[NR] = $0 402 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers 403 gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences 404 l = length 405 if (maxlen < l) maxlen = l 406 } 407 408 END { 409 n = (width - maxlen) / 2 410 if (n % 1) n = n - (n % 1) 411 fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0) 412 for (i = 1; i <= NR; i++) printf fmt, "", lines[i] 413 } 414 ' "$@" 415 } 416 417 # Colored Go Test on the folder given; uses my command `gbmawk` 418 cgt() { go test "${1:-.}" 2>&1 | gbmawk '/^ok/' '/^[-]* ?FAIL/' '/^\?/'; } 419 420 # ignore final life-feed from text, if it's the very last byte; also ignore 421 # all trailing carriage-returns 422 choplf() { 423 awk ' 424 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 425 NR > 1 { print ""; fflush() } 426 { gsub(/\r$/, ""); printf "%s", $0; fflush() } 427 ' "$@" 428 } 429 430 # Color Json using the `jq` app, allowing an optional filepath as the data 431 # source, and even an optional transformation formula 432 cj() { jq -C "${2:-.}" "${1:--}"; } 433 434 # clean the screen, after running the command given 435 clean() { 436 local res 437 if [ -p /dev/stdout ]; then 438 "$@" 439 return $? 440 fi 441 442 tput smcup 443 "$@" 444 res=$? 445 tput rmcup 446 return "${res}" 447 } 448 449 # show a live digital clock 450 clock() { watch -n 1 echo 'Press Ctrl + C to quit this clock'; } 451 452 # Colored Live/Line-buffered RipGrep ensures results show up immediately, 453 # also emitting colors when piped 454 clrg() { rg --color=always --line-buffered "$@"; } 455 456 # CLear Screen, like the old dos command of the same name 457 cls() { clear; } 458 459 # COunt COndition: count how many times the AWK expression given is true 460 coco() { 461 local cond="${1:-1}" 462 [ $# -gt 0 ] && shift 463 awk " 464 { low = lower = tolower(\$0) } 465 ${cond} { count++ } 466 END { print count } 467 " "$@" 468 } 469 470 # Colored RipGrep ensures app `rg` emits colors when piped 471 crg() { rg --color=always --line-buffered "$@"; } 472 473 # emit a line with a repeating cross-like symbol in it 474 crosses() { 475 [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -×-g' 476 } 477 478 # split lines using the string given, turning them into single-item lines 479 crumble() { 480 local sep="${1:- }" 481 [ $# -gt 0 ] && shift 482 awk -F "${sep}" '{ for (i = 1; i <= NF; i++) print $i; fflush() }' "$@" 483 } 484 485 # Clean Run clears the screen, after running the command given 486 cr() { 487 local res 488 if [ -p /dev/stdout ]; then 489 "$@" 490 return $? 491 fi 492 493 tput smcup 494 "$@" 495 res=$? 496 tput rmcup 497 return "${res}" 498 } 499 500 # turn Comma-Separated-Values tables into Tab-Separated-Values tables 501 csv2tsv() { xsv fmt -t '\t' "$@"; } 502 503 # Change Units turns common US units into international ones; uses my 504 # scripts `bu` (Better Units) and `nn` (Nice Numbers) 505 cu() { 506 bu "$@" | awk ' 507 NF == 5 || (NF == 4 && $NF == "s") { print $(NF-1), $NF } 508 NF == 4 && $NF != "s" { print $NF } 509 ' | nn --gray 510 } 511 512 # CURL Silent spares you the progress bar, but still tells you about errors 513 curls() { curl --show-error -s "$@"; } 514 515 # Count With AWK: count the times the AWK expression/condition given is true 516 cwawk() { 517 local cond="${1:-1}" 518 [ $# -gt 0 ] && shift 519 awk " 520 { low = lower = tolower(\$0) } 521 ${cond} { count++ } 522 END { print count } 523 " "$@" 524 } 525 526 # listen to streaming DANCE music 527 dance() { 528 printf "streaming \e[7mDance Wave Retro\e[0m\n" 529 # mpv --quiet https://retro.dancewave.online/retrodance.mp3 530 mpv --really-quiet https://retro.dancewave.online/retrodance.mp3 531 } 532 533 # emit a line with a repeating dash-like symbol in it 534 dashes() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -—-g'; } 535 536 # DEcode BASE64-encoded data, or even base64-encoded data-URIs, by ignoring 537 # the leading data-URI declaration, if present 538 debase64() { sed -E 's-^data:.{0,50};base64,--' "${1:--}" | base64 -d; } 539 540 # DECAPitate (lines) emits the first line as is, piping all lines after that 541 # to the command given, passing all/any arguments/options to it 542 # decap() { 543 # awk -v cmd="$*" 'NR == 1 { print; fflush() } NR > 1 { print | cmd }' 544 # } 545 546 # ignore whole-comment lines, or just trailing unix-style comments in them 547 decomment() { 548 awk '/^ *#/ { next } { gsub(/ *#.*$/, ""); print; fflush(); }' "$@" 549 } 550 551 # turn Comma-Separated-Values tables into tab-separated-values tables 552 # decsv() { xsv fmt -t '\t' "$@"; } 553 554 # DEDUPlicate prevents lines from appearing more than once 555 dedup() { awk '!c[$0]++ { print; fflush() }' "$@"; } 556 557 # dictionary-DEFine the word given, using an online service 558 def() { 559 local arg 560 local gap=0 561 for arg in "$@"; do 562 [ "${gap}" -gt 0 ] && printf "\n" 563 gap=1 564 printf "\e[7m%-80s\x1b[0m\n" "${arg}" 565 curl -s "dict://dict.org/d:${arg}" | awk ' 566 { gsub(/\r$/, "") } 567 /^151 / { 568 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush() 569 next 570 } 571 /^[1-9][0-9]{2} / { 572 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush() 573 next 574 } 575 { print; fflush() } 576 ' 577 done | less -JMKiCRS 578 } 579 580 # dictionary-define the word given, using an online service 581 define() { 582 local arg 583 local gap=0 584 for arg in "$@"; do 585 [ "${gap}" -gt 0 ] && printf "\n" 586 gap=1 587 printf "\e[7m%-80s\x1b[0m\n" "${arg}" 588 curl -s "dict://dict.org/d:${arg}" | awk ' 589 { gsub(/\r$/, "") } 590 /^151 / { 591 printf "\x1b[38;2;52;101;164m%s\x1b[0m\n", $0; fflush() 592 next 593 } 594 /^[1-9][0-9]{2} / { 595 printf "\x1b[38;2;128;128;128m%s\x1b[0m\n", $0; fflush() 596 next 597 } 598 { print; fflush() } 599 ' 600 done | less -JMKiCRS 601 } 602 603 # DEcompress GZip-encoded data 604 # degz() { zcat "$@"; } 605 606 # turn JSON Lines into a proper json array 607 dejsonl() { jq -s -M "${@:-.}"; } 608 609 # delay lines from the standard-input, waiting the number of seconds given 610 # for each line, or waiting 1 second by default 611 # delay() { 612 # local seconds="${1:-1}" 613 # ( 614 # IFS="$(printf "\n")" 615 # while read -r line; do 616 # sleep "${seconds}" 617 # printf "%s\n" "${line}" 618 # done 619 # ) 620 # } 621 622 # convert lines of Space(s)-Separated Values into lines of tab-separated values 623 dessv() { 624 awk ' 625 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 626 627 { 628 gsub(/\r$/, "") 629 for (i = 1; i <= NF; i++) { 630 if (i > 1) printf "\t" 631 printf "%s", $i 632 } 633 printf "\n"; fflush() 634 } 635 ' "$@" 636 } 637 638 # expand tabs each into up to the number of space given, or 4 by default 639 detab() { expand -t "${1:-4}"; } 640 641 # ignore trailing spaces, as well as trailing carriage returns 642 detrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 643 644 # turn UTF-16 data into UTF-8 645 deutf16() { iconv -f utf16 -t utf8 "$@"; } 646 647 # DICtionary-define the word given locally 648 dic() { 649 local arg 650 local gap=0 651 for arg in "$@"; do 652 [ "${gap}" -gt 0 ] && printf "\n" 653 gap=1 654 printf "\e[7m%-80s\x1b[0m\n" "${arg}" 655 dict "${arg}" 656 done | less -JMKiCRS 657 } 658 659 # DIVide 2 numbers 3 ways, including the complement 660 div() { 661 awk -v a="${1:-1}" -v b="${2:-1}" ' 662 BEGIN { 663 gsub(/_/, "", a) 664 gsub(/_/, "", b) 665 if (a > b) { c = a; a = b; b = c } 666 c = 1 - a / b 667 if (0 <= c && c <= 1) printf "%f\n%f\n%f\n", a / b, b / a, c 668 else printf "%f\n%f\n", a / b, b / a 669 exit 670 }' 671 } 672 673 # get/fetch data from the filename or URI given; named `dog` because dogs can 674 # `fetch` things for you 675 # dog() { 676 # if [ $# -gt 1 ]; then 677 # printf "\e[38;2;204;0;0mdogs only have 1 mouth to fetch with\e[0m\n" >&2 678 # return 1 679 # fi 680 # 681 # if [ -e "$1" ]; then 682 # cat "$1" 683 # return $? 684 # fi 685 # 686 # case "${1:--}" in 687 # -) cat -;; 688 # file://*|https://*|http://*) curl --show-error -s "$1";; 689 # ftp://*|ftps://*|sftp://*) curl --show-error -s "$1";; 690 # dict://*|telnet://*) curl --show-error -s "$1";; 691 # data:*) echo "$1" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;; 692 # *) curl --show-error -s "https://$1";; 693 # esac 2> /dev/null || { 694 # printf "\e[38;2;204;0;0mcan't fetch %s\e[0m\n" "${1:--}" >&2 695 # return 1 696 # } 697 # } 698 699 # emit a line with a repeating dot-like symbol in it 700 dots() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed 's- -·-g'; } 701 702 # ignore/remove all matched regexes given on all stdin lines 703 drop() { 704 awk ' 705 BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } } 706 { 707 for (i = 1; i < ARGC; i++) gsub(e[i], "") 708 print; fflush() 709 } 710 ' "${@:-\r$}" 711 } 712 713 # show the current Date and Time 714 dt() { 715 printf "\e[38;2;78;154;6m%s\e[0m \e[38;2;52;101;164m%s\e[0m\n" \ 716 "$(date +'%a %b %d')" "$(date +%T)" 717 } 718 719 # show the current Date, Time, and a Calendar with the 3 `current` months 720 dtc() { 721 { 722 # show the current date/time center-aligned 723 printf "%20s\e[38;2;78;154;6m%s\e[0m \e[38;2;52;101;164m%s\e[0m\n\n" \ 724 "" "$(date +'%a %b %d')" "$(date +%T)" 725 # debian linux has a different `cal` app which highlights the day 726 if [ -e "/usr/bin/ncal" ]; then 727 # fix debian/ncal's weird way to highlight the current day 728 ncal -C -3 | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g' 729 else 730 cal -3 731 fi 732 } | less -JMKiCRS 733 } 734 735 e() { echo "$@"; } 736 737 e4() { expand -t 4 "$@"; } 738 739 e8() { expand -t 8 "$@"; } 740 741 # Evaluate Awk expression 742 ea() { 743 local expr="${1:-0}" 744 [ $# -gt 0 ] && shift 745 awk "BEGIN { print ${expr}; exit }" "$@" 746 } 747 748 # EDit plain-text files 749 # ed() { tilde -b "$@"; } 750 751 # EDit plain-text files 752 ed() { micro "$@"; } 753 754 # edit plain-text files 755 # edit() { tilde -b "$@"; } 756 757 # edit plain-text files 758 edit() { micro "$@"; } 759 760 # EDit RUN shell commands, using an interactive editor 761 edrun() { . <( micro -readonly true -filetype shell | leak --inv ); } 762 763 # Extended-mode Grep, enabling its full regex syntax 764 eg() { grep -E --line-buffered "$@"; } 765 766 # Extended Grep, Recursive Interactive and Plain 767 # egrip() { ugrep -r -Q --color=never -E "$@"; } 768 769 # show all empty files in a folder, digging recursively 770 emptyfiles() { 771 local arg 772 for arg in "${@:-.}"; do 773 if [ ! -d "${arg}" ]; then 774 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 775 return 1 776 fi 777 stdbuf -oL find "${arg}" -type f -empty 778 done 779 } 780 781 # show all empty folders in a folder, digging recursively 782 emptyfolders() { 783 local arg 784 for arg in "${@:-.}"; do 785 if [ ! -d "${arg}" ]; then 786 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 787 return 1 788 fi 789 stdbuf -oL find "${arg}" -type d -empty 790 done 791 } 792 793 # Evaluate Nodejs expression 794 # en() { 795 # local expr="${1:-null}" 796 # expr="$(echo "${expr}" | sed 's-\\-\\\\-g; s-`-\`-g')" 797 # node -e "console.log(${expr})" | sed 's-\x1b\[[^A-Za-z]+[A-Za-z]--g' 798 # } 799 800 # Evaluate Python expression 801 ep() { python -c "print(${1:-None})"; } 802 803 # Extended Plain Interactive Grep 804 epig() { ugrep --color=never -Q -E "$@"; } 805 806 # Extended Plain Recursive Interactive Grep 807 eprig() { ugrep --color=never -Q -E "$@"; } 808 809 # Evaluate Ruby expression 810 # er() { ruby -e "puts ${1:-nil}"; } 811 812 # Edit Run shell commands, using an interactive editor 813 er() { . <( micro -readonly true -filetype shell | leak --inv ); } 814 815 # ignore/remove all matched regexes given on all stdin lines 816 erase() { 817 awk ' 818 BEGIN { for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } } 819 { 820 for (i = 1; i < ARGC; i++) gsub(e[i], "") 821 print; fflush() 822 } 823 ' "${@:-\r$}" 824 } 825 826 # Editor Read-Only 827 ero() { micro -readonly true "$@"; } 828 829 # Extended-mode Sed, enabling its full regex syntax 830 es() { sed -E -u "$@"; } 831 832 # Expand Tabs each into up to the number of space given, or 4 by default 833 et() { expand -t "${1:-4}"; } 834 835 # convert EURos into CAnadian Dollars, using the latest official exchange 836 # rates from the bank of canada; during weekends, the latest rate may be 837 # from a few days ago; the default amount of euros to convert is 1, when 838 # not given 839 eur2cad() { 840 local site='https://www.bankofcanada.ca/valet/observations/group' 841 local csv_rates="${site}/FX_RATES_DAILY/csv" 842 local url 843 url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')" 844 curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" ' 845 /EUR/ { for (i = 1; i <= NF; i++) if($i ~ /EUR/) j = i } 846 END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }' 847 } 848 849 # EValuate AWK expression 850 evawk() { 851 local expr="${1:-0}" 852 [ $# -gt 0 ] && shift 853 awk "BEGIN { print ${expr}; exit }" "$@" 854 } 855 856 # get various currency EXchange RATES 857 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/$1"; } 858 859 # get various currency EXchange RATES 860 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-USD}"; } 861 862 # get various currency EXchange RATES 863 # exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-EUR}"; } 864 865 # get various currency EXchange RATES 866 exrates() { curl -s "https://api.exchangerate-api.com/v4/latest/${1:-CAD}"; } 867 868 # convert fahrenheit into celsius 869 fahrenheit() { 870 echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | 871 awk '/./ { printf "%.2f\n", ($0 - 32) * 5.0/9.0 }' 872 } 873 874 # Flushed AWK 875 fawk() { stdbuf -oL awk "$@"; } 876 877 # fetch/web-request all URIs given, using protcol HTTPS when none is given 878 fetch() { 879 local a 880 for a in "$@"; do 881 case "$a" in 882 file://*|https://*|http://*) curl --show-error -s "$a";; 883 ftp://*|ftps://*|sftp://*) curl --show-error -s "$a";; 884 dict://*|telnet://*) curl --show-error -s "$a";; 885 data:*) echo "$a" | sed -E 's-^data:.{0,50};base64,--' | base64 -d;; 886 *) curl --show-error -s "https://$a";; 887 esac 888 done 889 } 890 891 # run the Fuzzy Finder (fzf) in multi-choice mode, with custom keybindings 892 ff() { fzf -m --bind ctrl-a:select-all,ctrl-space:toggle "$@"; } 893 894 # FInd FIles 895 fifi() { 896 local arg 897 local what="${1:-.}" 898 [ $# -gt 0 ] && shift 899 900 for arg in "${@:-.}"; do 901 if [ ! -d "${arg}" ]; then 902 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 903 return 1 904 fi 905 stdbuf -oL find "${arg}" -type f 906 done | awk -v what="${what}" ' 907 BEGIN { 908 m = "this variant of AWK lacks case-insensitive regex-matching" 909 if (IGNORECASE == "") { 910 printf("\x1b[38;2;204;0;0m%s\x1b[0m\n", m) > "/dev/stderr" 911 exit 125 912 } 913 IGNORECASE = 1 914 } 915 916 # BEGIN { what = tolower(what) } 917 # tolower($0) ~ what { got++; print; fflush() } 918 $0 ~ what { got++; print; fflush() } 919 END { exit(got == 0) } 920 ' 921 } 922 923 # show all files in a folder, digging recursively 924 files() { 925 local arg 926 for arg in "${@:-.}"; do 927 if [ ! -d "${arg}" ]; then 928 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 929 return 1 930 fi 931 stdbuf -oL find "${arg}" -type f 932 done 933 } 934 935 # recursively find all files with fewer bytes than the number given 936 filesunder() { 937 local n 938 n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')" 939 [ $# -gt 0 ] && shift 940 941 local arg 942 for arg in "${@:-.}"; do 943 if [ ! -d "${arg}" ]; then 944 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 945 return 1 946 fi 947 stdbuf -oL find "${arg}" -type f -size -"$n"c 948 done 949 } 950 951 # get the first n lines, or 1 by default 952 first() { head -n "${1:-1}" "${2:--}"; } 953 954 # limit data up to the first n bytes 955 firstbytes() { head -c "$1" "${2:--}"; } 956 957 # get the first n lines, or 1 by default 958 firstlines() { head -n "${1:-1}" "${2:--}"; } 959 960 # fix lines, ignoring leading UTF-8_BOMs (byte-order-marks) on each input's 961 # first line, turning all end-of-line CRLF byte-pairs into single line-feeds, 962 # and ensuring each input's last line ends with a line-feed; trailing spaces 963 # are also ignored 964 fixlines() { 965 awk ' 966 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 967 { gsub(/ *\r?$/, ""); print; fflush() } 968 ' "$@" 969 } 970 971 # FLushed AWK 972 # flawk() { stdbuf -oL awk "$@"; } 973 974 # First Line AWK, emits the first line as is, and uses the rest of the args 975 # given by injecting the first into the script, and passing all later args as 976 # later args to `awk` as given 977 flawk() { 978 local code="${1:-1}" 979 [ $# -gt 0 ] && shift 980 stdbuf -oL awk "NR == 1 { print; fflush(); next } ${code}" "$@" 981 } 982 983 # Faint LEAK emits/tees input both to stdout and stderr, coloring gray what 984 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes 985 # involving several steps 986 fleak() { 987 awk ' 988 { 989 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") 990 printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0 > "/dev/stderr" 991 print; fflush() 992 } 993 ' "$@" 994 } 995 996 # try to run the command given using line-buffering for its (standard) output 997 flushlines() { stdbuf -oL "$@"; } 998 999 # show all folders in a folder, digging recursively 1000 folders() { 1001 local arg 1002 for arg in "${@:-.}"; do 1003 if [ ! -d "${arg}" ]; then 1004 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 1005 return 1 1006 fi 1007 stdbuf -oL find "${arg}" -type d | awk '!/^\.$/ { print; fflush() }' 1008 done 1009 } 1010 1011 # start from the line number given, skipping all previous ones 1012 fromline() { tail -n +"${1:-1}" "${2:--}"; } 1013 1014 # convert FeeT into meters 1015 ft() { 1016 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1017 awk '/./ { printf "%.2f\n", 0.3048 * $0; fflush() }' 1018 } 1019 1020 # convert FeeT² (squared) into meters² 1021 ft2() { 1022 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1023 awk '/./ { printf "%.2f\n", 0.09290304 * $0 }' 1024 } 1025 1026 # convert a mix of FeeT and INches into meters 1027 ftin() { 1028 local ft="${1:-0}" 1029 ft="$(echo "${ft}" | sed 's-_--g')" 1030 local in="${2:-0}" 1031 in="$(echo "${in}" | sed 's-_--g')" 1032 awk "BEGIN { print 0.3048 * ${ft} + 0.0254 * ${in}; exit }" 1033 } 1034 1035 # Get/fetch data from the filenames/URIs given; uses my tool `get` 1036 g() { get "$@"; } 1037 1038 # run `grep` in extended-regex mode, enabling its full regex syntax 1039 # g() { grep -E --line-buffered "$@"; } 1040 1041 # convert GALlons into liters 1042 gal() { 1043 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1044 awk '/./ { printf "%.2f\n", 3.785411784 * $0; fflush() }' 1045 } 1046 1047 # convert binary GigaBytes into bytes 1048 gb() { 1049 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1050 awk '/./ { printf "%.4f\n", 1073741824 * $0; fflush() }' | 1051 sed 's-\.00*$--' 1052 } 1053 1054 # glue/stick together various lines, only emitting a line-feed at the end; an 1055 # optional argument is the output-item-separator, which is empty by default 1056 glue() { 1057 local sep="${1:-}" 1058 [ $# -gt 0 ] && shift 1059 awk -v sep="${sep}" ' 1060 NR > 1 { printf "%s", sep } 1061 { gsub(/\r/, ""); printf "%s", $0; fflush() } 1062 END { if (NR > 0) print ""; fflush() } 1063 ' "$@" 1064 } 1065 1066 # GO Build Stripped: a common use-case for the go compiler 1067 gobs() { go build -ldflags "-s -w" -trimpath "$@"; } 1068 1069 # GO DEPendencieS: show all dependencies in a go project 1070 godeps() { go list -f '{{ join .Deps "\n" }}' "$@"; } 1071 1072 # GO IMPortS: show all imports in a go project 1073 goimps() { go list -f '{{ join .Imports "\n" }}' "$@"; } 1074 1075 # go to the folder picked using an interactive TUI; uses my tool `bf` 1076 goto() { 1077 local where 1078 where="$(bf "${1:-.}")" 1079 if [ $? -ne 0 ]; then 1080 return 0 1081 fi 1082 1083 where="$(realpath "${where}")" 1084 if [ ! -d "${where}" ]; then 1085 where="$(dirname "${where}")" 1086 fi 1087 cd "${where}" || return 1088 } 1089 1090 # GRayed-out lines with AWK 1091 grawk() { 1092 local cond="${1:-1}" 1093 [ $# -gt 0 ] && shift 1094 awk "${cond}"' { 1095 gsub(/\x1b\[0m/, "\x1b[0m\x1b[38;2;168;168;168m") 1096 printf "\x1b[38;2;168;168;168m%s\x1b[0m\n", $0; fflush() 1097 next 1098 } 1099 { print; fflush() } 1100 ' "$@" 1101 } 1102 1103 # Style lines using a GRAY-colored BACKground 1104 grayback() { 1105 awk ' 1106 { 1107 gsub(/\x1b\[0m/, "\x1b[0m\x1b[48;2;218;218;218m") 1108 printf "\x1b[48;2;218;218;218m%s\x1b[0m\n", $0; fflush() 1109 } 1110 ' "$@" 1111 } 1112 1113 # Grep, Recursive Interactive and Plain 1114 # grip() { ugrep -r -Q --color=never -E "$@"; } 1115 1116 # Global extended regex SUBstitute, using the AWK function of the same name: 1117 # arguments are used as regex/replacement pairs, in that order 1118 gsub() { 1119 awk ' 1120 BEGIN { 1121 for (i = 1; i < ARGC; i++) { 1122 args[++n] = ARGV[i] 1123 delete ARGV[i] 1124 } 1125 } 1126 { 1127 for (i = 1; i <= n; i += 2) gsub(args[i], args[i + 1]) 1128 print; fflush() 1129 } 1130 ' "$@" 1131 } 1132 1133 # show Help laid out on 2 side-by-side columns; uses my tool `bsbs` 1134 h2() { naman "$@" | bsbs 2; } 1135 1136 # Highlight (lines) with AWK 1137 hawk() { 1138 local cond="${1:-1}" 1139 [ $# -gt 0 ] && shift 1140 awk ' 1141 { low = lower = tolower($0) } 1142 '"${cond}"' { 1143 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 1144 printf "\x1b[7m%s\x1b[0m\n", $0; fflush() 1145 next 1146 } 1147 { print; fflush() } 1148 ' "$@" 1149 } 1150 1151 # play a heartbeat-like sound lasting the number of seconds given, or for 1 1152 # second by default; uses my tool `waveout` 1153 heartbeat() { 1154 local a='sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1])' 1155 local b='((12, u), (8, (u-0.25)%1))' 1156 local f="sum($a for v in $b) / 2" 1157 # local f='sum(sin(10*tau*exp(-20*v))*exp(-2*v) for v in (u, (u-0.25)%1))/2' 1158 # local f='sum(sin(v[0]*tau*exp(-20*v[1]))*exp(-2*v[1]) for v in ((12, u), (8, (u-0.25)%1)))/2' 1159 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 1160 } 1161 1162 # Highlighted-style ECHO 1163 hecho() { printf "\e[7m%s\e[0m\n" "$*"; } 1164 1165 # show each byte as a pair of HEXadecimal (base-16) symbols 1166 hexify() { 1167 cat "$@" | od -x -A n | 1168 awk '{ gsub(/ +/, ""); printf "%s", $0; fflush() } END { printf "\n" }' 1169 } 1170 1171 # Nice HyperFine, runs `hyperfine` with all colors/styles on 1172 nhf() { 1173 hyperfine --style full "$@" | sed -u 's-\x1b\[1;32m-\x1b\[38;2;0;135;95m-g' 1174 } 1175 1176 # HIghlighted-style ECHO 1177 hiecho() { printf "\e[7m%s\e[0m\n" "$*"; } 1178 1179 # highlight lines 1180 highlight() { 1181 awk ' 1182 { 1183 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 1184 printf "\x1b[7m%s\x1b[0m\n", $0; fflush() 1185 } 1186 ' "$@" 1187 } 1188 1189 # HIghlight LEAK emits/tees input both to stdout and stderr, highlighting what 1190 # it emits to stderr using an ANSI-style; this cmd is useful to `debug` pipes 1191 # involving several steps 1192 hileak() { 1193 awk ' 1194 { 1195 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") 1196 printf "\x1b[7m%s\x1b[0m\n", $0 > "/dev/stderr" 1197 print; fflush() 1198 } 1199 ' "$@" 1200 } 1201 1202 # highlight lines 1203 hilite() { 1204 awk ' 1205 { 1206 gsub(/\x1b\[0m/, "\x1b[0m\x1b[7m") 1207 printf "\x1b[7m%s\x1b[0m\n", $0; fflush() 1208 } 1209 ' "$@" 1210 } 1211 1212 # Help Me Remember my custom shell commands 1213 hmr() { 1214 local cmd="bat" 1215 # debian linux uses a different name for the `bat` app 1216 if [ -e "/usr/bin/batcat" ]; then 1217 cmd="batcat" 1218 fi 1219 1220 "$cmd" \ 1221 --style=plain,header,numbers --theme='Monokai Extended Light' \ 1222 --wrap=never --color=always "$(which clam)" | 1223 sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' | less -JMKiCRS 1224 } 1225 1226 # convert seconds into a colon-separated Hours-Minutes-Seconds triple 1227 hms() { 1228 echo "${@:-0}" | sed -E 's-_--g; s- +-\n-g' | awk '/./ { 1229 x = $0 1230 h = (x - x % 3600) / 3600 1231 m = (x % 3600) / 60 1232 s = x % 60 1233 printf "%02d:%02d:%05.2f\n", h, m, s; fflush() 1234 }' 1235 } 1236 1237 # find all hyperlinks inside HREF attributes in the input text 1238 href() { 1239 awk ' 1240 BEGIN { e = "href=\"[^\"]+\"" } 1241 { 1242 for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) { 1243 print substr(s, RSTART + 6, RLENGTH - 7); fflush() 1244 } 1245 } 1246 ' "$@" 1247 } 1248 1249 # Index all lines starting from 0, using a tab right after each line number 1250 # i() { 1251 # local start="${1:-0}" 1252 # [ $# -gt 0 ] && shift 1253 # stdbuf -oL nl -b a -w 1 -v "${start}" "$@" 1254 # } 1255 1256 # Index all lines starting from 0, using a tab right after each line number 1257 i() { stdbuf -oL nl -b a -w 1 -v 0 "$@"; } 1258 1259 # avoid/ignore lines which case-insensitively match any of the regexes given 1260 iavoid() { 1261 awk ' 1262 BEGIN { 1263 if (IGNORECASE == "") { 1264 m = "this variant of AWK lacks case-insensitive regex-matching" 1265 printf("\x1b[38;2;204;0;0m%s\x1b[0m\n", m) > "/dev/stderr" 1266 exit 125 1267 } 1268 IGNORECASE = 1 1269 1270 for (i = 1; i < ARGC; i++) { 1271 e[i] = ARGV[i] 1272 delete ARGV[i] 1273 } 1274 } 1275 1276 { 1277 for (i = 1; i < ARGC; i++) if ($0 ~ e[i]) next 1278 print; fflush(); got++ 1279 } 1280 1281 END { exit(got == 0) } 1282 ' "${@:-^\r?$}" 1283 } 1284 1285 # case-Insensitively DEDUPlicate prevents lines from appearing more than once 1286 idedup() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; } 1287 1288 # ignore/remove all case-insensitively matched regexes given on all stdin lines 1289 idrop() { 1290 awk ' 1291 BEGIN { 1292 if (IGNORECASE == "") { 1293 m = "this variant of AWK lacks case-insensitive regex-matching" 1294 printf("\x1b[38;2;204;0;0m%s\x1b[0m\n", m) > "/dev/stderr" 1295 exit 125 1296 } 1297 IGNORECASE = 1 1298 1299 for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } 1300 } 1301 1302 { 1303 for (i = 1; i < ARGC; i++) gsub(e[i], "") 1304 print; fflush() 1305 } 1306 ' "${@:-\r$}" 1307 } 1308 1309 # ignore/remove all case-insensitively matched regexes given on all stdin lines 1310 ierase() { 1311 awk ' 1312 BEGIN { 1313 if (IGNORECASE == "") { 1314 m = "this variant of AWK lacks case-insensitive regex-matching" 1315 printf("\x1b[38;2;204;0;0m%s\x1b[0m\n", m) > "/dev/stderr" 1316 exit 125 1317 } 1318 IGNORECASE = 1 1319 1320 for (i = 1; i < ARGC; i++) { e[i] = ARGV[i]; delete ARGV[i] } 1321 } 1322 1323 { 1324 for (i = 1; i < ARGC; i++) gsub(e[i], "") 1325 print; fflush() 1326 } 1327 ' "${@:-\r$}" 1328 } 1329 1330 # ignore command in a pipe: this allows quick re-editing of pipes, while 1331 # still leaving signs of previously-used steps, as a memo 1332 ignore() { cat; } 1333 1334 # only keep lines which case-insensitively match any of the regexes given 1335 imatch() { 1336 awk ' 1337 BEGIN { 1338 if (IGNORECASE == "") { 1339 m = "this variant of AWK lacks case-insensitive regex-matching" 1340 printf("\x1b[38;2;204;0;0m%s\x1b[0m\n", m) > "/dev/stderr" 1341 exit 125 1342 } 1343 IGNORECASE = 1 1344 1345 for (i = 1; i < ARGC; i++) { 1346 e[i] = ARGV[i] 1347 delete ARGV[i] 1348 } 1349 } 1350 1351 { 1352 for (i = 1; i < ARGC; i++) { 1353 if ($0 ~ e[i]) { 1354 print; fflush() 1355 got++ 1356 next 1357 } 1358 } 1359 } 1360 1361 END { exit(got == 0) } 1362 ' "${@:-[^\r]}" 1363 } 1364 1365 # start each non-empty line with extra n spaces 1366 indent() { 1367 awk ' 1368 BEGIN { 1369 n = ARGV[1] + 0 1370 delete ARGV[1] 1371 fmt = sprintf("%%%ds%%s\n", (n > 0) ? n : 0) 1372 } 1373 1374 /^\r?$/ { print ""; fflush(); next } 1375 { gsub(/\r$/, ""); printf(fmt, "", $0); fflush() } 1376 ' "$@" 1377 } 1378 1379 # listen to INTENSE streaming radio 1380 intense() { 1381 printf "streaming \e[7mIntense Radio\e[0m\n" 1382 mpv --quiet https://secure.live-streams.nl/flac.flac 1383 } 1384 1385 # show public-IP-related INFOrmation 1386 # ipinfo() { curl -s ipinfo.io; } 1387 1388 # show public-IP-related INFOrmation 1389 ipinfo() { curl -s ipinfo.io | jq; } 1390 1391 # Interactive Read-Only Top 1392 irot() { htop --readonly "$@"; } 1393 1394 # emit each word-like item from each input line on its own line; when a file 1395 # has tabs on its first line, items are split using tabs alone, which allows 1396 # items to have spaces in them 1397 items() { 1398 awk ' 1399 FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 } 1400 { gsub(/\r$/, ""); for (i = 1; i <= NF; i++) print $i; fflush() } 1401 ' "$@" 1402 } 1403 1404 # case-insensitively deduplicate lines, keeping them in their original order: 1405 # the checking/matching is case-insensitive, but each first match is output 1406 # exactly as is 1407 iunique() { awk '!c[tolower($0)]++ { print; fflush() }' "$@"; } 1408 1409 # shrink/compact Json data, allowing an optional filepath 1410 # j0() { python -m json.tool --compact "${1:--}"; } 1411 1412 # shrink/compact Json using the `jq` app, allowing an optional filepath, and 1413 # even an optional transformation formula after that 1414 # j0() { jq -c -M "${2:-.}" "${1:--}"; } 1415 1416 # show Json data on multiple lines, using 2 spaces for each indentation level, 1417 # allowing an optional filepath 1418 # j2() { python -m json.tool --indent 2 "${1:--}"; } 1419 1420 # show Json data on multiple lines, using 2 spaces for each indentation level, 1421 # allowing an optional filepath, and even an optional transformation formula 1422 # after that 1423 # j2() { jq --indent 2 -M "${2:-.}" "${1:--}"; } 1424 1425 # listen to streaming JAZZ music 1426 jazz() { 1427 printf "streaming \e[7mSmooth Jazz Instrumental\e[0m\n" 1428 # mpv https://stream.zeno.fm/00rt0rdm7k8uv 1429 mpv --quiet https://stream.zeno.fm/00rt0rdm7k8uv 1430 } 1431 1432 # show a `dad` JOKE from the web, sometimes even a very funny one 1433 # joke() { 1434 # curl -s https://icanhazdadjoke.com | fold -s | sed -E 's- *\r?$--' 1435 # # plain-text output from previous cmd doesn't end with a line-feed 1436 # printf "\n" 1437 # } 1438 1439 # show a `dad` JOKE from the web, sometimes even a very funny one 1440 joke() { 1441 curl --show-error -s https://icanhazdadjoke.com | fold -s | 1442 awk '{ gsub(/ *\r?$/, ""); print }' 1443 } 1444 1445 # shrink/compact JSON data, allowing an optional filepath 1446 # json0() { python -m json.tool --compact "${1:--}"; } 1447 1448 # shrink/compact JSON using the `jq` app, allowing an optional filepath, and 1449 # even an optional transformation formula after that 1450 json0() { jq -c -M "${2:-.}" "${1:--}"; } 1451 1452 # show JSON data on multiple lines, using 2 spaces for each indentation level, 1453 # allowing an optional filepath 1454 # json2() { python -m json.tool --indent 2 "${1:--}"; } 1455 1456 # show JSON data on multiple lines, using 2 spaces for each indentation level, 1457 # allowing an optional filepath, and even an optional transformation formula 1458 # after that 1459 json2() { jq --indent 2 -M "${2:-.}" "${1:--}"; } 1460 1461 # turn JSON Lines into a proper JSON array 1462 jsonl2json() { jq -s -M "${@:-.}"; } 1463 1464 # emit the given number of random/junk bytes, or 1024 junk bytes by default 1465 junk() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" /dev/urandom; } 1466 1467 # only keep the file-extension part from lines ending with file-extensions 1468 # justext() { 1469 # awk ' 1470 # !/^\./ && /\./ { gsub(/^.+\.+/, ""); printf ".%s\n", $0; fflush() } 1471 # ' "$@" 1472 # } 1473 1474 # only keep the file-extension part from lines ending with file-extensions 1475 justext() { 1476 awk ' 1477 !/^\./ && /\./ { 1478 if (match($0, /((\.[A-Za-z0-9]+)+) *\r?$/)) { 1479 print substr($0, RSTART, RLENGTH); fflush() 1480 } 1481 } 1482 ' "$@" 1483 } 1484 1485 # only keep lines ending with a file-extension of any popular picture format 1486 justpictures() { 1487 awk ' 1488 /.\.(bmp|gif|heic|ico|jfif|jpe?g|png|svg|tiff?|webp) *\r?$/ { 1489 gsub(/ *\r?$/, ""); print; fflush() 1490 } 1491 ' "$@" 1492 } 1493 1494 # only keep lines ending with a file-extension of any popular sound format 1495 justsounds() { 1496 awk ' 1497 /.\.(aac|aif[cf]?|au|flac|m4a|m4b|mp[23]|ogg|snd|wav|wma) *\r?$/ { 1498 gsub(/ *\r?$/, ""); print; fflush() 1499 } 1500 ' "$@" 1501 } 1502 1503 # only keep lines ending with a file-extension of any popular video format 1504 justvideos() { 1505 awk ' 1506 /.\.(avi|mkv|mov|mp4|mpe?g|ogv|webm|wmv) *\r?$/ { 1507 gsub(/ *\r?$/, ""); print; fflush() 1508 } 1509 ' "$@" 1510 } 1511 1512 # convert binary KiloBytes into bytes 1513 kb() { 1514 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1515 awk '/./ { printf "%.2f\n", 1024 * $0; fflush() }' | 1516 sed 's-\.00*$--' 1517 } 1518 1519 # run `less`, showing line numbers, among other settings 1520 l() { less -JMKNiCRS "$@"; } 1521 1522 # Like A Book groups lines as 2 side-by-side pages, the same way books 1523 # do it; uses my tool `book` 1524 lab() { book "$(($(tput lines) - 1))" "$@" | less -JMKiCRS; } 1525 1526 # LABEL/precede data with an ANSI-styled line 1527 label() { printf "\e[7m%-*s\e[0m\n" "$(($(tput cols) - 2))" "$*"; cat -; } 1528 1529 # find the LAN (local-area network) IP address for this device 1530 lanip() { hostname -I; } 1531 1532 # Line xARGS: `xargs` using line separators, which handles filepaths 1533 # with spaces, as long as the standard input has 1 path per line 1534 # largs() { tr -d '\r' | tr '\n' '\000' xargs -0 "$@"; } 1535 1536 # Line xARGS: `xargs` using line separators, which handles filepaths 1537 # with spaces, as long as the standard input has 1 path per line 1538 largs() { 1539 awk -v ORS='\000' ' 1540 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 1541 { gsub(/\r$/, ""); print; fflush() } 1542 ' | xargs -0 "$@" 1543 } 1544 1545 # get the last n lines, or 1 by default 1546 last() { tail -n "${1:-1}" "${2:--}"; } 1547 1548 # get up to the last given number of bytes 1549 lastbytes() { tail -c "${1:-1}" "${2:--}"; } 1550 1551 # get the last n lines, or 1 by default 1552 lastlines() { tail -n "${1:-1}" "${2:--}"; } 1553 1554 # turn UTF-8 into its latin-like subset, where variants of latin letters stay 1555 # as given, and where all other symbols become question marks, one question 1556 # mark for each code-point byte 1557 latinize() { 1558 iconv -f utf-8 -t latin-1//translit "$@" | iconv -f latin-1 -t utf-8 1559 } 1560 1561 # Lowercased (lines) AWK 1562 lawk() { 1563 local code="${1:-1}" 1564 [ $# -gt 0 ] && shift 1565 awk " 1566 { 1567 line = orig = original = \$0 1568 low = lower = tolower(\$0) 1569 \$0 = lower 1570 } 1571 ${code} 1572 { fflush() } 1573 " "$@"; 1574 } 1575 1576 # convert pounds (LB) into kilograms 1577 lb() { 1578 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1579 awk '/./ { printf "%.2f\n", 0.45359237 * $0; fflush() }' 1580 } 1581 1582 # convert a mix of pounds (LB) and weight-ounces (OZ) into kilograms 1583 lboz() { 1584 local lb="${1:-0}" 1585 lb="$(echo "${lb}" | sed 's-_--g')" 1586 local oz="${2:-0}" 1587 oz="$(echo "${oz}" | sed 's-_--g')" 1588 awk "BEGIN { print 0.45359237 * ${lb} + 0.028349523 * ${oz}; exit }" 1589 } 1590 1591 # turn the first n space-separated fields on each line into tab-separated 1592 # ones; this behavior is useful to make the output of many cmd-line tools 1593 # into TSV, since filenames are usually the last fields, and these may 1594 # contain spaces which aren't meant to be split into different fields 1595 leadtabs() { 1596 local n="$1" 1597 local cmd 1598 cmd="$([ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "")" 1599 cmd="s-^ *--; s- *\\r?\$--; $(echo "${cmd}" | sed 's/ /s- +-\\t-1;/g')" 1600 sed -u -E "${cmd}" 1601 } 1602 1603 # run `less`, showing line numbers, among other settings 1604 least() { less -JMKNiCRS "$@"; } 1605 1606 # limit stops at the first n bytes, or 1024 bytes by default 1607 limit() { head -c "$(echo "${1:-1024}" | sed 's-_--g')" "${2:--}"; } 1608 1609 # Less with Header runs `less` with line numbers, ANSI styles, no line-wraps, 1610 # and using the first n lines as a sticky-header (1 by default), so they 1611 # always show on top 1612 lh() { 1613 local n="${1:-1}" 1614 [ $# -gt 0 ] && shift 1615 less --header="$n" -JMKNiCRS "$@" 1616 } 1617 1618 lh2() { less --header=2 -JMKNiCRS "$@"; } 1619 1620 # ensure lines are never accidentally joined across files, by always emitting 1621 # a line-feed at the end of each line 1622 lines() { awk '{ print; fflush() }' "$@"; } 1623 1624 # regroup adjacent lines into n-item tab-separated lines 1625 lineup() { 1626 local n="${1:-0}" 1627 [ $# -gt 0 ] && shift 1628 1629 if [ "$n" -le 0 ]; then 1630 awk ' 1631 NR > 1 { printf "\t" } 1632 { printf "%s", $0; fflush() } 1633 END { if (NR > 0) print "" } 1634 ' "$@" 1635 return $? 1636 fi 1637 1638 awk -v n="$n" ' 1639 NR % n != 1 && n > 1 { printf "\t" } 1640 { printf "%s", $0; fflush() } 1641 NR % n == 0 { print ""; fflush() } 1642 END { if (NR % n != 0) print "" } 1643 ' "$@" 1644 } 1645 1646 # find all hyperLINKS (https:// and http://) in the input text 1647 links() { 1648 awk ' 1649 BEGIN { e = "https?://[A-Za-z0-9+_.:%-]+(/[A-Za-z0-9+_.%/,#?&=-]*)*" } 1650 { 1651 # match all links in the current line 1652 for (s = $0; match(s, e); s = substr(s, RSTART + RLENGTH)) { 1653 print substr(s, RSTART, RLENGTH); fflush() 1654 } 1655 } 1656 ' "$@" 1657 } 1658 1659 # List files, using the `Long` option 1660 # ll() { ls -l "$@"; } 1661 1662 # LOAD data from the filename or URI given; uses my tool `get` 1663 load() { get "$@"; } 1664 1665 # LOwercase line, check (awk) COndition: on each success, the original line 1666 # is output with its original letter-casing, as its lower-cased version is 1667 # only a convenience meant for the condition 1668 loco() { 1669 local cond="${1:-1}" 1670 [ $# -gt 0 ] && shift 1671 awk " 1672 { 1673 line = orig = original = \$0 1674 low = lower = tolower(\$0) 1675 \$0 = lower 1676 } 1677 ${cond} { print line; fflush() } 1678 " "$@" 1679 } 1680 1681 # LOcal SERver webserves files in a folder as localhost, using the port 1682 # number given, or port 8080 by default 1683 loser() { 1684 printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2 1685 python3 -m http.server "${1:-8080}" -d "${2:-.}" 1686 } 1687 1688 # LOWercase all ASCII symbols 1689 low() { awk '{ print tolower($0); fflush() }' "$@"; } 1690 1691 # LOWERcase all ASCII symbols 1692 lower() { awk '{ print tolower($0); fflush() }' "$@"; } 1693 1694 # Live/Line-buffered RipGrep ensures results show/pipe up immediately 1695 lrg() { rg --line-buffered "$@"; } 1696 1697 # Listen To Youtube 1698 lty() { 1699 local url 1700 # some youtube URIs end with extra playlist/tracker parameters 1701 url="$(echo "$1" | sed 's-&.*--')" 1702 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 1703 } 1704 1705 # only keep lines which match any of the regexes given 1706 match() { 1707 awk ' 1708 BEGIN { 1709 for (i = 1; i < ARGC; i++) { 1710 e[i] = ARGV[i] 1711 delete ARGV[i] 1712 } 1713 } 1714 1715 { 1716 for (i = 1; i < ARGC; i++) { 1717 if ($0 ~ e[i]) { 1718 print; fflush() 1719 got++ 1720 next 1721 } 1722 } 1723 } 1724 1725 END { exit(got == 0) } 1726 ' "${@:-[^\r]}" 1727 } 1728 1729 # MAX Width truncates lines up to the given number of items/bytes given, or up 1730 # to 80 by default; output lines end with an ANSI reset-code, in case input 1731 # lines use ANSI styles 1732 maxw() { 1733 local maxwidth="${1:-80}" 1734 [ $# -gt 0 ] && shift 1735 awk -v maxw="${maxwidth}" ' 1736 { 1737 gsub(/\r$/, "") 1738 printf("%s\x1b[0m\n", substr($0, 1, maxw)); fflush() 1739 } 1740 ' "$@" 1741 } 1742 1743 # convert binary MegaBytes into bytes 1744 mb() { 1745 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1746 awk '/./ { printf "%.2f\n", 1048576 * $0; fflush() }' | 1747 sed 's-\.00*$--' 1748 } 1749 1750 # Multi-Core MAKE runs `make` using all cores 1751 mcmake() { make -j "$(nproc)" "$@"; } 1752 1753 # Multi-Core MaKe runs `make` using all cores 1754 mcmk() { make -j "$(nproc)" "$@"; } 1755 1756 # merge stderr into stdout, without any ugly keyboard-dancing 1757 # merrge() { "$@" 2>&1; } 1758 1759 # convert MIles into kilometers 1760 mi() { 1761 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1762 awk '/./ { printf "%.2f\n", 1.609344 * $0; fflush() }' 1763 } 1764 1765 # convert MIles² (squared) into kilometers² 1766 mi2() { 1767 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1768 awk '/./ { printf "%.2f\n", 2.5899881103360 * $0 }' 1769 } 1770 1771 # Make In Folder 1772 mif() { 1773 local code 1774 pushd "${1:-.}" > /dev/null || return 1775 [ $# -gt 0 ] && shift 1776 make "$@" 1777 code=$? 1778 popd > /dev/null || return "${code}" 1779 return "${code}" 1780 } 1781 1782 # Media INFO 1783 # minfo() { mediainfo "$@" | less -JMKiCRS; } 1784 1785 # Media INFO 1786 # minfo() { ffprobe "$@" |& less -JMKiCRS; } 1787 1788 # run `make` 1789 mk() { make "$@"; } 1790 1791 # convert Miles Per Hour into kilometers per hour 1792 mph() { 1793 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1794 awk '/./ { printf "%.2f\n", 1.609344 * $0 }' 1795 } 1796 1797 # Number all lines, using a tab right after each line number 1798 # n() { 1799 # local start="${1:-1}" 1800 # [ $# -gt 0 ] && shift 1801 # stdbuf -oL nl -b a -w 1 -v "${start}" "$@" 1802 # } 1803 1804 # Number all lines, using a tab right after each line number 1805 n() { stdbuf -oL nl -b a -w 1 -v 1 "$@"; } 1806 1807 # NArrow MANual, keeps `man` narrow, even if the window/tab is wide when run 1808 naman() { 1809 local w 1810 w="$(tput cols)" 1811 w="$((w / 2 - 4))" 1812 if [ "$w" -lt 80 ]; then 1813 w=80 1814 fi 1815 MANWIDTH="$w" man "$@" 1816 } 1817 1818 # Not AND sorts its 2 inputs, then finds lines not in common 1819 nand() { 1820 # comm -3 <(sort "$1") <(sort "$2") 1821 # dash doesn't support the process-sub syntax 1822 (sort "$1" | (sort "$2" | (comm -3 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 1823 } 1824 1825 # Nice Byte Count, using my scripts `nn` and `cext` 1826 nbc() { wc -c "$@" | nn --gray | cext; } 1827 1828 # listen to streaming NEW WAVE music 1829 newwave() { 1830 printf "streaming \e[7mNew Wave radio\e[0m\n" 1831 mpv --quiet https://puma.streemlion.com:2910/stream 1832 } 1833 1834 # NIce(r) COlumns makes the output of many commands whose output starts with 1835 # a header line easier to read; uses my tool `nn` 1836 nico() { 1837 awk ' 1838 (NR - 1) % 5 == 1 && NR > 1 { print "" } 1839 { printf "%5d %s\n", NR - 1, $0; fflush() } 1840 ' "$@" | nn --gray | less -JMKiCRS 1841 } 1842 1843 # emit nothing to output and/or discard everything from input 1844 nil() { 1845 if [ $# -gt 0 ]; then 1846 "$@" > /dev/null 1847 else 1848 cat < /dev/null 1849 fi 1850 } 1851 1852 # pipe-run my scripts `nj` (Nice Json) and `nn` (Nice Numbers) 1853 njnn() { nj "$@" | nn --gray; } 1854 1855 # Narrow MANual, keeps `man` narrow, even if the window/tab is wide when run 1856 nman() { 1857 local w 1858 w="$(tput cols)" 1859 w="$((w / 2 - 4))" 1860 if [ "$w" -lt 80 ]; then 1861 w=80 1862 fi 1863 MANWIDTH="$w" man "$@" 1864 } 1865 1866 # convert Nautical MIles into kilometers 1867 nmi() { 1868 echo "${@:-1}" | sed -E 's-_--g; s- +-\n-g' | 1869 awk '/./ { printf "%.2f\n", 1.852 * $0; fflush() }' 1870 } 1871 1872 # NO (standard) ERRor ignores stderr, without any ugly keyboard-dancing 1873 # noerr() { "$@" 2> /dev/null; } 1874 1875 # play a white-noise sound lasting the number of seconds given, or for 1 1876 # second by default; uses my tool `waveout` 1877 noise() { waveout "${1:-1}" "${2:-0.05} * random()" | mpv --really-quiet -; } 1878 1879 # ignore trailing spaces, as well as trailing carriage returns 1880 notrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 1881 1882 # show the current date and time 1883 now() { date +'%Y-%m-%d %H:%M:%S'; } 1884 1885 # Nice Print Python result; uses my tool `nn` 1886 npp() { python -c "print($1)" | nn "${2:---gray}"; } 1887 1888 # Nice Ps shows/lists all current processes shown by `ps`; uses my tool `nn` 1889 np() { 1890 local res 1891 local code 1892 # res="$(ps "${@:-auxf}")" 1893 res="$(ps "${@:-aux}")" 1894 code=$? 1895 if [ "${code}" -ne 0 ]; then 1896 return "${code}" 1897 fi 1898 1899 echo "${res}" | awk ' 1900 BEGIN { 1901 d = strftime("%a %b %d") 1902 t = strftime("%H:%M:%S") 1903 # printf "\x1b[7m%30s%s %s%30s\x1b[0m\n\n", "", d, t, "" 1904 fmt = "\x1b[38;2;128;128;128m\x1b[7m%30s%s %s%30s\x1b[0m\n\n" 1905 printf fmt, "", d, t, "" 1906 } 1907 1908 (NR - 1) % 5 == 1 && NR > 1 { print "" } 1909 1910 $1 == "root" { 1911 gsub(/^/, "\x1b[38;2;52;101;164m") 1912 gsub(/ +/, "&\x1b[0m\x1b[38;2;52;101;164m") 1913 gsub(/$/, "\x1b[0m") 1914 } 1915 1916 { 1917 gsub(/ \? /, "\x1b[38;2;135;135;175m&\x1b[0m") 1918 gsub(/0\.0/, "\x1b[38;2;135;135;175m&\x1b[0m") 1919 gsub(/0:00/, "\x1b[38;2;135;135;175m&\x1b[0m") 1920 printf "%3d %s\n", NR - 1, $0 1921 } 1922 ' | nn --gray | less -JMKiCRS 1923 } 1924 1925 # Nice Quick Calculation; uses my tool `nn` 1926 nqc() { calc -m 5 "${@:--h}" | sed 's-^\t--' | nn; } 1927 1928 # Nice Size, using my scripts `nn` and `cext` 1929 ns() { wc -c "$@" | nn --gray | cext; } 1930 1931 # Nice SystemCtl Status 1932 nscs() { 1933 systemctl status "$@" 2>&1 | sed 's-\x1b\[[^A-Za-z][A-Za-z]--g' | sed -E \ 1934 -e 's-(^[^ ] )([^ ]+\.service)-\1\x1b[7m\2\x1b[0m-' \ 1935 -e 's- (enabled)- \x1b[38;2;0;135;95m\x1b[7m\1\x1b[0m-g' \ 1936 -e 's- (disabled)- \x1b[38;2;215;95;0m\x1b[7m\1\x1b[0m-g' \ 1937 -e 's- (active \(running\))- \x1b[38;2;0;135;95m\x1b[7m\1\x1b[0m-g' \ 1938 -e 's- (inactive \(dead\))- \x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m-g' \ 1939 -e 's-^(Unit .* could not .*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' \ 1940 -e 's-(\[WARN\].*)$-\x1b[38;2;215;95;0m\x1b[7m\1\x1b[0m\n-' \ 1941 -e 's-(\[ERR\].*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' | 1942 less -JMKiCRS 1943 } 1944 1945 # Nice Systemctl Status 1946 nss() { 1947 systemctl status "$@" 2>&1 | sed 's-\x1b\[[^A-Za-z][A-Za-z]--g' | sed -E \ 1948 -e 's-(^[^ ] )([^ ]+\.service)-\1\x1b[7m\2\x1b[0m-' \ 1949 -e 's- (enabled)- \x1b[38;2;0;135;95m\x1b[7m\1\x1b[0m-g' \ 1950 -e 's- (disabled)- \x1b[38;2;215;95;0m\x1b[7m\1\x1b[0m-g' \ 1951 -e 's- (active \(running\))- \x1b[38;2;0;135;95m\x1b[7m\1\x1b[0m-g' \ 1952 -e 's- (inactive \(dead\))- \x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m-g' \ 1953 -e 's-^(Unit .* could not .*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' \ 1954 -e 's-(\[WARN\].*)$-\x1b[38;2;215;95;0m\x1b[7m\1\x1b[0m\n-' \ 1955 -e 's-(\[ERR\].*)$-\x1b[38;2;204;0;0m\x1b[7m\1\x1b[0m\n-' | 1956 less -JMKiCRS 1957 } 1958 1959 # Nice Transform Json, using my scripts `tj`, and `nj` 1960 ntj() { tj "$@" | nj; } 1961 1962 # Nice TimeStamp 1963 nts() { 1964 ts '%Y-%m-%d %H:%M:%S' | 1965 sed -u 's-^-\x1b[48;2;218;218;218m\x1b[38;2;0;95;153m-; s- -\x1b[0m\t-2' 1966 } 1967 1968 # emit nothing to output and/or discard everything from input 1969 null() { 1970 if [ $# -gt 0 ]; then 1971 "$@" > /dev/null 1972 else 1973 cat < /dev/null 1974 fi 1975 } 1976 1977 # NULl-terminate LINES ends each stdin line with a null byte, instead of a 1978 # line-feed byte 1979 nullines() { 1980 awk -v ORS='\000' ' 1981 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 1982 { gsub(/\r$/, ""); print; fflush() } 1983 ' "$@" 1984 } 1985 1986 # (Nice) What Are These (?) shows what the names given to it are/do, coloring 1987 # the syntax of shell functions 1988 nwat() { 1989 local arg 1990 local gap=0 1991 1992 if [ $# -eq 0 ]; then 1993 printf "\e[38;2;204;0;0mnwat: no names given\e[0m\n" >&2 1994 return 1 1995 fi 1996 1997 local cmd="bat" 1998 # debian linux uses a different name for the `bat` app 1999 if [ -e "/usr/bin/batcat" ]; then 2000 cmd="batcat" 2001 fi 2002 2003 for arg in "$@"; do 2004 [ "${gap}" -gt 0 ] && printf "\n" 2005 gap=1 2006 printf "\e[48;2;218;218;218m%-80s\e[0m\n" "${arg}" 2007 # printf "\e[7m%-80s\e[0m\n" "${arg}" 2008 2009 while alias "${arg}" > /dev/null 2> /dev/null; do 2010 arg="$(alias "${arg}" | sed -E "s-^[^=]+=['\"](.+)['\"]\$-\\1-")" 2011 done 2012 2013 if type "${arg}" > /dev/null 2> /dev/null; then 2014 type "${arg}" | awk 'NR == 1 && / is a function$/ { next } 1' | 2015 "$cmd" -l sh --style=plain --theme='Monokai Extended Light' \ 2016 --wrap=never --color=always | 2017 sed 's-\x1b\[38;5;70m-\x1b\[38;5;28m-g' 2018 else 2019 printf "\e[38;2;204;0;0m%s not found\e[0m\n" "${arg}" 2020 fi 2021 done | less -JMKiCRS 2022 } 2023 2024 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`, 2025 # alternating styles to make long numbers easier to read 2026 # nwc() { wc "$@" | nn --gray; } 2027 2028 # Nice numbers Word-Count runs `wc` and colors results with my script `nn`, 2029 # alternating styles to make long numbers easier to read 2030 # nwc() { wc "$@" | nn --gray | awk '{ printf "%5d %s\n", NR, $0; fflush() }'; } 2031 2032 # Nice Word-Count runs `wc` and colors results, using my scripts `nn` and 2033 # `cext`, alternating styles to make long numbers easier to read 2034 nwc() { 2035 wc "$@" | sort -rn | nn --gray | cext | 2036 awk '{ printf "%5d %s\n", NR - 1, $0; fflush() }' 2037 } 2038 2039 # Nice Weather Forecast 2040 nwf() { 2041 printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" | 2042 curl --show-error -s telnet://graph.no:79 | 2043 sed -E \ 2044 -e 's/ *\r?$//' \ 2045 -e '/^\[/d' \ 2046 -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \ 2047 -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \ 2048 -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \ 2049 -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \ 2050 -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \ 2051 -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \ 2052 -e 's/\*/○/g' | 2053 awk 1 | 2054 less -JMKiCRS 2055 } 2056 2057 # Nice Zoom Json, using my scripts `zj`, and `nj` 2058 nzj() { zj "$@" | nj; } 2059 2060 # Plain text; uses my tool `plain` 2061 p() { plain "$@"; } 2062 2063 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode 2064 # pawk() { awk -F='' -v RS='' "$@"; } 2065 2066 # Paragraph AWK runs `awk` in block/paragraph/multiline input-mode 2067 pawk() { stdbuf -oL awk -F='' -v RS='' "$@"; } 2068 2069 # Plain `fd` 2070 pfd() { fd --color=never "$@"; } 2071 2072 # Plain HyperFine, runs `hyperfine` with all colors/styles off 2073 phf() { hyperfine --style nocolor "$@"; } 2074 2075 # pick lines, using all the 1-based line-numbers given 2076 picklines() { 2077 awk ' 2078 BEGIN { m = ARGC - 1; if (ARGC == 1) exit 0 } 2079 BEGIN { for (i = 1; i <= m; i++) { p[i] = ARGV[i]; delete ARGV[i] } } 2080 { l[++n] = $0 } 2081 END { 2082 for (i = 1; i <= m; i++) { 2083 j = p[i] 2084 if (j < 0) j += NR + 1 2085 if (0 < j && j <= NR) print l[j] 2086 } 2087 } 2088 ' "$@" 2089 } 2090 2091 # Plain Interactive Grep 2092 pig() { ugrep --color=never -Q -E "$@"; } 2093 2094 # make text plain, by ignoring ANSI terminal styling 2095 plain() { 2096 awk ' 2097 { 2098 gsub(/\x1b\[[0-9;]*[A-Za-z]/, "") # ANSI style-changers 2099 # gsub(/\x1b\][^:]:|\a|\x1b\\/, "") # OSC sequences 2100 print; fflush() 2101 } 2102 ' "$@" 2103 } 2104 2105 # end all lines with an ANSI-code to reset styles 2106 plainend() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; } 2107 2108 # end all lines with an ANSI-code to reset styles 2109 plainends() { awk '{ printf "%s\x1b[0m\n", $0; fflush() }' "$@"; } 2110 2111 # play audio/video media 2112 # play() { mplayer -msglevel all=-1 "${@:--}"; } 2113 2114 # play audio/video media 2115 play() { mpv "${@:--}"; } 2116 2117 # Pick LINE, using the 1-based line-number given 2118 pline() { 2119 local line="$1" 2120 [ $# -gt 0 ] && shift 2121 awk -v n="${line}" ' 2122 BEGIN { if (n < 1) exit 0 } 2123 NR == n { print; exit 0 } 2124 ' "$@" 2125 } 2126 2127 # Paused MPV; especially useful when trying to view pictures via `mpv` 2128 pmpv() { mpv --pause "${@:--}"; } 2129 2130 # Print Python result 2131 pp() { python -c "print($1)"; } 2132 2133 # PRecede (input) ECHO, prepends a first line to stdin lines 2134 precho() { echo "$@" && cat /dev/stdin; } 2135 2136 # PREcede (input) MEMO, prepends a first highlighted line to stdin lines 2137 prememo() { 2138 awk ' 2139 BEGIN { 2140 if (ARGC > 1) printf "\x1b[7m" 2141 for (i = 1; i < ARGC; i++) { 2142 if (i > 1) printf " " 2143 printf "%s", ARGV[i] 2144 delete ARGV[i] 2145 } 2146 if (ARGC > 1) printf "\x1b[0m\n" 2147 fflush() 2148 } 2149 { print; fflush() } 2150 ' "$@" 2151 } 2152 2153 # start by joining all arguments given as a tab-separated-items line of output, 2154 # followed by all lines from stdin verbatim 2155 pretsv() { 2156 awk ' 2157 BEGIN { 2158 for (i = 1; i < ARGC; i++) { 2159 if (i > 1) printf "\t" 2160 printf "%s", ARGV[i] 2161 delete ARGV[i] 2162 } 2163 if (ARGC > 1) printf "\n" 2164 fflush() 2165 } 2166 { print; fflush() } 2167 ' "$@" 2168 } 2169 2170 # Plain Recursive Interactive Grep 2171 prig() { ugrep --color=never -r -Q -E "$@"; } 2172 2173 # show/list all current processes 2174 processes() { 2175 local res 2176 res="$(ps aux)" 2177 echo "${res}" | awk '!/ps aux$/' | sed -E \ 2178 -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' \ 2179 -e 's- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1; s- +-\t-1' 2180 } 2181 2182 # Play Youtube Audio 2183 pya() { 2184 local url 2185 # some youtube URIs end with extra playlist/tracker parameters 2186 url="$(echo "$1" | sed 's-&.*--')" 2187 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 2188 } 2189 2190 # Quiet ignores stderr, without any ugly keyboard-dancing 2191 q() { "$@" 2> /dev/null; } 2192 2193 # Quick Calculation 2194 qc() { calc -m 5 "${@:--h}" | sed 's-^\t--'; } 2195 2196 # Quick Edit plain-text files 2197 qe() { tilde -b "$@"; } 2198 2199 # Quiet MPV 2200 qmpv() { mpv --quiet "${@:--}"; } 2201 2202 # ignore stderr, without any ugly keyboard-dancing 2203 quiet() { "$@" 2> /dev/null; } 2204 2205 # Reset the screen, which empties it and resets the current style 2206 r() { reset; } 2207 2208 # keep only lines between the 2 line numbers given, inclusively 2209 rangelines() { 2210 { [ "$#" -eq 2 ] || [ "$#" -eq 3 ]; } && [ "${1}" -le "${2}" ] && 2211 { tail -n +"${1:-1}" "${3:--}" | head -n "$(("${2}" - "${1}" + 1))"; } 2212 } 2213 2214 # RANdom MANual page 2215 ranman() { 2216 find "/usr/share/man/man${1:-1}" -type f | shuf -n 1 | xargs basename | 2217 sed 's-\.gz$--' | xargs man 2218 } 2219 2220 # Run AWK expression 2221 rawk() { 2222 local expr="${1:-0}" 2223 [ $# -gt 0 ] && shift 2224 awk "BEGIN { print ${expr}; exit }" "$@" 2225 } 2226 2227 # play a ready-phone-line sound lasting the number of seconds given, or for 1 2228 # second by default; uses my tool `waveout` 2229 ready() { 2230 local f='0.5 * sin(350*tau*t) + 0.5 * sin(450*tau*t)' 2231 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 2232 } 2233 2234 # reflow/trim lines of prose (text) to improve its legibility: it's especially 2235 # useful when the text is pasted from web-pages being viewed in reader mode 2236 reprose() { 2237 local w="${1:-80}" 2238 [ $# -gt 0 ] && shift 2239 awk ' 2240 FNR == 1 && NR > 1 { print "" } 2241 { gsub(/\r$/, ""); print; fflush() } 2242 ' "$@" | fold -s -w "$w" | sed -u -E 's- *\r?$--' 2243 } 2244 2245 # ignore ansi styles from stdin and restyle things using the style-name given; 2246 # uses my script `style` 2247 restyle() { style "$@"; } 2248 2249 # change the tab-title on your terminal app 2250 retitle() { printf "\e]0;%s\a\n" "$*"; } 2251 2252 # REVerse-order SIZE (byte-count) 2253 revsize() { wc -c "$@" | sort -rn; } 2254 2255 # Run In Folder 2256 rif() { 2257 local code 2258 pushd "${1:-.}" > /dev/null || return 2259 [ $# -gt 0 ] && shift 2260 "$@" 2261 code=$? 2262 popd > /dev/null || return "${code}" 2263 return "${code}" 2264 } 2265 2266 # play a ringtone-style sound lasting the number of seconds given, or for 1 2267 # second by default; uses my tool `waveout` 2268 ringtone() { 2269 local f='sin(2048 * tau * t) * exp(-50 * (t%0.1))' 2270 waveout "${1:-1}" "${2:-1} * $f" | mpv --really-quiet - 2271 } 2272 2273 # Read-Only Editor 2274 roe() { micro -readonly true "$@"; } 2275 2276 # Read-Only Micro (text editor) 2277 rom() { micro -readonly true "$@"; } 2278 2279 # run the command given, trying to turn its output into TSV (tab-separated 2280 # values); uses my tool `dejson` 2281 rtab() { jc "$@" | dejson; } 2282 2283 # Right TRIM ignores trailing spaces, as well as trailing carriage returns 2284 rtrim() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2285 2286 # show a RULER-like width-measuring line 2287 # ruler() { 2288 # local n="${1:-$(tput cols)}" 2289 # [ "${n}" -gt 0 ] && printf "%${n}s\n" "" | 2290 # sed -E 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-' 2291 # } 2292 2293 # show a RULER-like width-measuring line 2294 ruler() { 2295 [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" "" | sed -E \ 2296 's- {10}-····╵····│-g; s- -·-g; s-·····-····╵-' 2297 } 2298 2299 # run the command given, trying to turn its output into TSV (tab-separated 2300 # values); uses my tool `dejson` 2301 runtab() { jc "$@" | dejson; } 2302 2303 # run the command given, trying to turn its output into TSV (tab-separated 2304 # values); uses my tool `dejson` 2305 runtsv() { jc "$@" | dejson; } 2306 2307 # Reverse-order WC 2308 rwc() { wc "$@" | sort -rn; } 2309 2310 # extended-mode Sed, enabling its full regex syntax 2311 # s() { sed -E -u "$@"; } 2312 2313 # Substitute using `sed`, enabling its full regex syntax 2314 # s() { sed -E -u "$(printf "s\xff%s\xff%s\xffg" "$1" "$2")"; } 2315 2316 # SystemCTL; `sysctl` is already taken for a separate/unrelated app 2317 sctl() { systemctl "$@" 2>&1 | less -JMKiCRS; } 2318 2319 # Silent CURL spares you the progress bar, but still tells you about errors 2320 scurl() { curl --show-error -s "$@"; } 2321 2322 # show a unique-looking SEParator line; useful to run between commands 2323 # which output walls of text 2324 sep() { 2325 [ "${1:-80}" -gt 0 ] && 2326 printf "\e[48;2;218;218;218m%${1:-80}s\e[0m\n" "" | sed 's- -·-g' 2327 } 2328 2329 # webSERVE files in a folder as localhost, using the port number given, or 2330 # port 8080 by default 2331 serve() { 2332 printf "\e[7mserving files in %s\e[0m\n" "${2:-$(pwd)}" >&2 2333 python3 -m http.server "${1:-8080}" -d "${2:-.}" 2334 } 2335 2336 # SET DIFFerence sorts its 2 inputs, then finds lines not in the 2nd input 2337 setdiff() { 2338 # comm -23 <(sort "$1") <(sort "$2") 2339 # dash doesn't support the process-sub syntax 2340 (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2341 } 2342 2343 # SET INtersection, sorts its 2 inputs, then finds common lines 2344 setin() { 2345 # comm -12 <(sort "$1") <(sort "$2") 2346 # dash doesn't support the process-sub syntax 2347 (sort "$1" | (sort "$2" | (comm -12 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2348 } 2349 2350 # SET SUBtraction sorts its 2 inputs, then finds lines not in the 2nd input 2351 setsub() { 2352 # comm -23 <(sort "$1") <(sort "$2") 2353 # dash doesn't support the process-sub syntax 2354 (sort "$1" | (sort "$2" | (comm -23 /dev/fd/3 /dev/fd/4) 4<&0) 3<&0) 2355 } 2356 2357 # Show Files (and folders), coloring folders and links; uses my tool `nn` 2358 sf() { 2359 local arg 2360 local gap=0 2361 2362 for arg in "${@:-.}"; do 2363 [ "${gap}" -gt 0 ] && printf "\n" 2364 printf "\e[7m%s\e[0m\n\n" "$(realpath "${arg}")" 2365 gap=1 2366 2367 ls -al --file-type --color=never --time-style iso "${arg}" | awk ' 2368 BEGIN { 2369 drep = "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m" 2370 lrep = "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m" 2371 } 2372 2373 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2374 2375 { 2376 gsub(/^(d[rwx-]+)/, drep) 2377 gsub(/^(l[rwx-]+)/, lrep) 2378 printf "%6d %s\n", NR - 1, $0; fflush() 2379 } 2380 ' 2381 done | nn --gray | less -JMKiCRS 2382 } 2383 2384 # Show Files (and folders) Plus, by coloring folders, links, and extensions; 2385 # uses my scripts `nn` and `cext` 2386 sfp() { 2387 local arg 2388 local gap=0 2389 2390 for arg in "${@:-.}"; do 2391 [ "${gap}" -gt 0 ] && printf "\n" 2392 printf "\e[7m%s\e[0m\n\n" "$(realpath "${arg}")" 2393 gap=1 2394 2395 ls -al --file-type --color=never --time-style iso "${arg}" | awk ' 2396 BEGIN { 2397 drep = "\x1b[38;2;0;135;255m\x1b[48;2;228;228;228m&\x1b[0m" 2398 lrep = "\x1b[38;2;0;135;95m\x1b[48;2;228;228;228m&\x1b[0m" 2399 } 2400 2401 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2402 2403 { 2404 gsub(/^(d[rwx-]+)/, drep) 2405 gsub(/^(l[rwx-]+)/, lrep) 2406 printf "%6d %s\n", NR - 1, $0; fflush() 2407 } 2408 ' 2409 done | nn --gray | cext | less -JMKiCRS 2410 } 2411 2412 # Show File Sizes, using my scripts `nn` and `cext` 2413 sfs() { 2414 # turn arg-list into single-item lines 2415 printf "%s\x00" "$@" | 2416 # calculate file-sizes, and reverse-sort results 2417 xargs -0 wc -c | sort -rn | 2418 # add/realign fields to improve legibility 2419 awk ' 2420 # start output with a header-like line, and add a MiB field 2421 BEGIN { printf "%6s %10s %8s name\n", "n", "bytes", "MiB"; fflush() } 2422 # make table breathe with empty lines, so tall outputs are readable 2423 (NR - 1) % 5 == 1 && NR > 1 { print "" } 2424 # emit regular output lines 2425 { 2426 printf "%6d %10d %8.2f ", NR - 1, $1, $1 / 1048576 2427 # first field is likely space-padded 2428 gsub(/^ */, "") 2429 # slice line after the first field, as filepaths can have spaces 2430 $0 = substr($0, length($1) + 1) 2431 # first field is likely space-padded 2432 gsub(/^ /, "") 2433 printf "%s\n", $0; fflush() 2434 } 2435 ' | 2436 # make zeros in the MiB field stand out with a special color 2437 awk ' 2438 { 2439 gsub(/ 00*\.00* /, "\x1b[38;2;135;135;175m&\x1b[0m") 2440 print; fflush() 2441 } 2442 ' | 2443 # make numbers nice, alternating styles along 3-digit groups 2444 nn --gray | 2445 # color-code file extensions 2446 cext | 2447 # make result interactively browsable 2448 less -JMKiCRS 2449 } 2450 2451 # SHell-run AWK output 2452 # shawk() { awk "$@" | sh; } 2453 2454 # SHell-run AWK output 2455 # shawk() { stdbuf -oL awk "$@" | sh; } 2456 2457 # time-run various tools given one-per-line from stdin, giving them extra 2458 # common arguments passed as explicit arguments 2459 showdown() { 2460 awk ' 2461 BEGIN { for (i = 1; i < ARGC; i++) { a[i] = ARGV[i]; delete ARGV[i] } } 2462 { 2463 printf "%s", $0 2464 for (i = 1; i < ARGC; i++) printf " %s", a[i] 2465 printf "\x00"; fflush() 2466 } 2467 ' "$@" | xargs -0 hyperfine --style full 2468 } 2469 2470 # SHOW a command, then RUN it 2471 showrun() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } 2472 2473 # SHell-QUOTE each line from the input(s): this is useful to make lines of 2474 # single-filepaths compatible with `xargs`, since standard shell settings 2475 # get in the way of filepaths with spaces and other special symbols in them 2476 shquote() { 2477 awk ' 2478 { 2479 s = $0 2480 gsub(/\r$/, "", s) 2481 gsub(/\\/, "\\\\", s) 2482 gsub(/"/, "\\\"", s) 2483 gsub(/`/, "\\`", s) 2484 gsub(/\$/, "\\$", s) 2485 printf "\"%s\"\n", s; fflush() 2486 } 2487 ' "$@" 2488 } 2489 2490 # clean the screen, after running the command given 2491 sideshow() { 2492 local res 2493 tput smcup 2494 "$@" 2495 res=$? 2496 tput rmcup 2497 return "${res}" 2498 } 2499 2500 # skip the first n lines, or the 1st line by default 2501 skip() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 2502 2503 # skip the first n bytes 2504 skipbytes() { tail -c +$(("$1" + 1)) "${2:--}"; } 2505 2506 # skip the last n lines, or the last line by default 2507 skiplast() { head -n -"${1:-1}" "${2:--}"; } 2508 2509 # skip the last n bytes 2510 skiplastbytes() { head -c -"$1" "${2:--}"; } 2511 2512 # skip the last n lines, or the last line by default 2513 skiplastlines() { head -n -"${1:-1}" "${2:--}"; } 2514 2515 # skip the first n lines, or the 1st line by default 2516 skiplines() { tail -n +$(("${1:-1}" + 1)) "${2:--}"; } 2517 2518 # SLOW/delay lines from the standard-input, waiting the number of seconds 2519 # given for each line, or waiting 1 second by default 2520 slow() { 2521 local seconds="${1:-1}" 2522 ( 2523 IFS="$(printf "\n")" 2524 while read -r line; do 2525 sleep "${seconds}" 2526 printf "%s\n" "${line}" 2527 done 2528 ) 2529 } 2530 2531 # Show Latest Podcasts, using my scripts `podfeed` and `si` 2532 slp() { 2533 local title 2534 title="Latest Podcast Episodes as of $(date +'%F %T')" 2535 podfeed -title "${title}" "$@" | si 2536 } 2537 2538 # recursively find all files with fewer bytes than the number given 2539 smallfiles() { 2540 local n 2541 n="$(echo "${1:-4097}" | sed -E 's-_--g; s-\.[0-9]+$--')" 2542 [ $# -gt 0 ] && shift 2543 2544 local arg 2545 for arg in "${@:-.}"; do 2546 if [ ! -d "${arg}" ]; then 2547 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 2548 return 1 2549 fi 2550 stdbuf -oL find "${arg}" -type f -size -"$n"c 2551 done 2552 } 2553 2554 # Stdbuf Output Line-buffered 2555 sol() { stdbuf -oL "$@"; } 2556 2557 # emit the first line as is, sorting all lines after that, using the 2558 # `sort` command, passing all/any arguments/options to it 2559 sortrest() { 2560 awk -v sort="sort $*" ' 2561 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2562 { gsub(/\r$/, "") } 2563 NR == 1 { print; fflush() } 2564 NR > 1 { print | sort } 2565 ' 2566 } 2567 2568 # SORt Tab-Separated Values: emit the first line as is, sorting all lines after 2569 # that, using the `sort` command in TSV (tab-separated values) mode, passing 2570 # all/any arguments/options to it 2571 sortsv() { 2572 awk -v sort="sort -t \"$(printf '\t')\" $*" ' 2573 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2574 { gsub(/\r$/, "") } 2575 NR == 1 { print; fflush() } 2576 NR > 1 { print | sort } 2577 ' 2578 } 2579 2580 # emit a line with the number of spaces given in it 2581 spaces() { [ "${1:-80}" -gt 0 ] && printf "%${1:-80}s\n" ""; } 2582 2583 # ignore leading spaces, trailing spaces, even runs of multiple spaces 2584 # in the middle of lines, as well as trailing carriage returns 2585 squeeze() { 2586 awk ' 2587 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2588 { 2589 gsub(/^ +| *\r?$/, "") 2590 gsub(/ *\t */, "\t") 2591 gsub(/ +/, " ") 2592 print; fflush() 2593 } 2594 ' "$@" 2595 } 2596 2597 # SQUeeze and stOMP, by ignoring leading spaces, trailing spaces, even runs 2598 # of multiple spaces in the middle of lines, as well as trailing carriage 2599 # returns, while also turning runs of empty lines into single empty lines, 2600 # and ignoring leading/trailing empty lines, effectively also `squeezing` 2601 # lines vertically 2602 squomp() { 2603 awk ' 2604 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2605 /^\r?$/ { empty = 1; next } 2606 empty { if (n > 0) print ""; empty = 0 } 2607 { 2608 gsub(/^ +| *\r?$/, "") 2609 gsub(/ *\t */, "\t") 2610 gsub(/ +/, " ") 2611 print; fflush() 2612 n++ 2613 } 2614 ' "$@" 2615 } 2616 2617 # Show a command, then Run it 2618 sr() { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } 2619 2620 # turn runs of empty lines into single empty lines, effectively squeezing 2621 # paragraphs vertically, so to speak; runs of empty lines both at the start 2622 # and at the end are ignored 2623 stomp() { 2624 awk ' 2625 /^\r?$/ { empty = 1; next } 2626 empty { if (n > 0) print ""; empty = 0 } 2627 { print; fflush(); n++ } 2628 ' "$@" 2629 } 2630 2631 # STRike-thru (lines) with AWK 2632 strawk() { 2633 local cond="${1:-1}" 2634 [ $# -gt 0 ] && shift 2635 awk ' 2636 { low = lower = tolower($0) } 2637 '"${cond}"' { 2638 gsub(/\x1b\[0m/, "\x1b[0m\x1b[9m") 2639 printf "\x1b[9m%s\x1b[0m\n", $0; fflush() 2640 next 2641 } 2642 { print; fflush() } 2643 ' "$@" 2644 } 2645 2646 # Sort Tab-Separated Values: emit the first line as is, sorting all lines after 2647 # that, using the `sort` command in TSV (tab-separated values) mode, passing 2648 # all/any arguments/options to it 2649 stsv() { 2650 awk -v sort="sort -t \"$(printf '\t')\" $*" ' 2651 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2652 { gsub(/\r$/, "") } 2653 NR == 1 { print; fflush() } 2654 NR > 1 { print | sort } 2655 ' 2656 } 2657 2658 # use the result of the `awk` function `substr` for each line 2659 substr() { 2660 local start="${1:-1}" 2661 local length="${2:-80}" 2662 [ $# -gt 0 ] && shift 2663 [ $# -gt 0 ] && shift 2664 awk -v start="${start}" -v len="${length}" \ 2665 '{ printf "%s\n", substr($0, start, len); fflush() }' "$@" 2666 } 2667 2668 # turn SUDo privileges OFF right away: arguments also cause `sudo` to run with 2669 # what's given, before relinquishing existing privileges 2670 # sudoff() { 2671 # local code=0 2672 # if [ $# -gt 0 ]; then 2673 # sudo "$@" 2674 # code=$? 2675 # fi 2676 # sudo -k 2677 # return "${code}" 2678 # } 2679 2680 # append a final Tab-Separated-Values line with the sums of all columns from 2681 # the input table(s) given; items from first lines aren't counted/added 2682 sumtsv() { 2683 awk -F "\t" ' 2684 # FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2685 2686 { 2687 gsub(/\r$/, "") 2688 print; fflush() 2689 if (width < NF) width = NF 2690 } 2691 2692 FNR > 1 { for (i = 1; i <= NF; i++) sums[i] += $i + 0 } 2693 2694 END { 2695 for (i = 1; i <= width; i++) { 2696 if (i > 1) printf "\t" 2697 printf "%s", sums[i] "" 2698 } 2699 if (width > 0) printf "\n" 2700 } 2701 ' "$@" 2702 } 2703 2704 # show a random command defined in `clam`, using `wat` from `clam` itself 2705 # surprise() { 2706 # local p="$(which clam)" 2707 # wat "$(grep -E '^[a-z]+\(' "$p" | shuf -n 1 | sed -E 's-\(.*--')" 2708 # } 2709 2710 # Time the command given 2711 # t() { /usr/bin/time "$@"; } 2712 2713 # Time the command given 2714 t() { time "$@"; } 2715 2716 # show a reverse-sorted tally of all lines read, where ties are sorted 2717 # alphabetically 2718 tally() { 2719 awk -v sortcmd="sort -t \"$(printf '\t')\" -rnk2 -k1d" ' 2720 # reassure users by instantly showing the header 2721 BEGIN { print "value\ttally"; fflush() } 2722 { gsub(/\r$/, ""); t[$0]++ } 2723 END { for (k in t) { printf("%s\t%d\n", k, t[k]) | sortcmd } } 2724 ' "$@" 2725 } 2726 2727 # Tab AWK: TSV-specific I/O settings for `awk` 2728 # tawk() { awk -F "\t" -v OFS="\t" "$@"; } 2729 2730 # Tab AWK: TSV-specific I/O settings for `awk` 2731 tawk() { stdbuf -oL awk -F "\t" -v OFS="\t" "$@"; } 2732 2733 # quick alias for my script `tbp` 2734 tb() { tbp "$@"; } 2735 2736 # Titled conCATenate Lines highlights each filename, before emitting its 2737 # lines 2738 tcatl() { 2739 awk ' 2740 FNR == 1 { printf "\x1b[7m%s\x1b[0m\n", FILENAME; fflush() } 2741 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2742 { gsub(/\r$/, ""); print; fflush() } 2743 ' "$@" 2744 } 2745 2746 # Title ECHO changes the tab-title on your terminal app 2747 techo() { printf "\e]0;%s\a\n" "$*"; } 2748 2749 # simulate the cadence of old-fashioned teletype machines, by slowing down 2750 # the output of ASCII/UTF-8 symbols from the standard-input 2751 # teletype() { 2752 # awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | ( 2753 # IFS="$(printf "\n")" 2754 # while read -r line; do 2755 # echo "${line}" | sed -E 's-(.)-\1\n-g' | 2756 # while read -r item; do 2757 # sleep 0.015 2758 # printf "%s" "${item}" 2759 # done 2760 # sleep 0.75 2761 # printf "\n" 2762 # done 2763 # ) 2764 # } 2765 2766 # simulate the cadence of old-fashioned teletype machines, by slowing down 2767 # the output of ASCII/UTF-8 symbols from the standard-input 2768 teletype() { 2769 awk ' 2770 { 2771 gsub(/\r$/, "") 2772 2773 n = length($0) 2774 for (i = 1; i <= n; i++) { 2775 if (code = system("sleep 0.015")) exit code 2776 printf "%s", substr($0, i, 1); fflush() 2777 } 2778 if (code = system("sleep 0.75")) exit code 2779 printf "\n"; fflush() 2780 } 2781 2782 # END { if (NR > 0 && code != 0) printf "\n" } 2783 ' "$@" 2784 } 2785 2786 # run `top` without showing any of its output after quitting it 2787 tip() { tput smcup; top "$@"; tput rmcup; } 2788 2789 # change the tab-title on your terminal app 2790 title() { printf "\e]0;%s\a\n" "$*"; } 2791 2792 # quick alias for my script `tjp` 2793 tj() { tjp "$@"; } 2794 2795 # quick alias for my script `tlp` 2796 tl() { tlp "$@"; } 2797 2798 # show current date in a specifc format 2799 today() { date +'%Y-%m-%d %a %b %d'; } 2800 2801 # get the first n lines, or 1 by default 2802 toline() { head -n "${1:-1}" "${2:--}"; } 2803 2804 # lowercase all ASCII symbols 2805 tolower() { awk '{ print tolower($0); fflush() }' "$@"; } 2806 2807 # play a tone/sine-wave sound lasting the number of seconds given, or for 1 2808 # second by default: after the optional duration, the next optional arguments 2809 # are the volume and the tone-frequency; uses my tool `waveout` 2810 tone() { 2811 waveout "${1:-1}" "${2:-1} * sin(${3:-440} * 2 * pi * t)" | 2812 mpv --really-quiet - 2813 } 2814 2815 # get the processes currently using the most cpu 2816 topcpu() { 2817 local n="${1:-10}" 2818 [ "$n" -gt 0 ] && ps aux | awk ' 2819 NR == 1 { print; fflush() } 2820 NR > 1 { print | "sort -rnk3" } 2821 ' | head -n "$(("$n" + 1))" 2822 } 2823 2824 # show all files directly in the folder given, without looking any deeper 2825 topfiles() { 2826 local arg 2827 for arg in "${@:-.}"; do 2828 if [ ! -d "${arg}" ]; then 2829 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 2830 return 1 2831 fi 2832 stdbuf -oL find "${arg}" -maxdepth 1 -type f 2833 done 2834 } 2835 2836 # show all folders directly in the folder given, without looking any deeper 2837 topfolders() { 2838 local arg 2839 for arg in "${@:-.}"; do 2840 if [ ! -d "${arg}" ]; then 2841 printf "\e[38;2;204;0;0mno folder named %s\e[0m\n" "${arg}" >&2 2842 return 1 2843 fi 2844 stdbuf -oL find "${arg}" -maxdepth 1 -type d | 2845 awk '!/^\.$/ { print; fflush() }' 2846 done 2847 } 2848 2849 # get the processes currently using the most memory 2850 topmemory() { 2851 local n="${1:-10}" 2852 [ "$n" -gt 0 ] && ps aux | awk ' 2853 NR == 1 { print; fflush() } 2854 NR > 1 { print | "sort -rnk6" } 2855 ' | head -n "$(("$n" + 1))" 2856 } 2857 2858 # transpose (switch) rows and columns from tables 2859 transpose() { 2860 awk ' 2861 { gsub(/\r$/, "") } 2862 2863 FNR == 1 { FS = ($0 ~ /\t/) ? "\t" : " "; $0 = $0 } 2864 2865 { 2866 for (i = 1; i <= NF; i++) lines[i][NR] = $i 2867 if (maxitems < NF) maxitems = NF 2868 } 2869 2870 END { 2871 for (j = 1; j <= maxitems; j++) { 2872 for (i = 1; i <= NR; i++) { 2873 if (i > 1) printf "\t" 2874 printf "%s", lines[j][i] 2875 } 2876 printf "\n" 2877 } 2878 } 2879 ' "$@" 2880 } 2881 2882 # ignore leading/trailing spaces, as well as trailing carriage returns 2883 trim() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2884 2885 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the 2886 # decimal dots themselves, when decimals in a number are all zeros; works 2887 # on gawk and busybox awk, but not on mawk, as the latter lacks `gensub` 2888 # trimdecs() { 2889 # awk ' 2890 # FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 2891 # { 2892 # gsub(/\r$/, "") 2893 # $0 = gensub(/([0-9]+)\.0+/, "\\1", "g") 2894 # $0 = gensub(/([0-9]+\.[0-9]*[1-9]+)0+/, "\\1", "g") 2895 # print; fflush() 2896 # } 2897 # ' "$@" 2898 # } 2899 2900 # TRIM DECimalS ignores all trailing decimal zeros in numbers, even the 2901 # decimal dots themselves, when decimals in a number are all zeros 2902 trimdecs() { 2903 awk '{ gsub(/\r$/, ""); print; fflush() }' "$@" | 2904 sed -u -E 's-([0-9]+)\.0+-\1-g; s-([0-9]+\.[0-9]*[1-9]+)0+-\1-g' 2905 } 2906 2907 # ignore trailing spaces, as well as trailing carriage returns 2908 trimend() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2909 2910 # ignore trailing spaces, as well as trailing carriage returns 2911 trimends() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2912 2913 # ignore leading/trailing spaces, as well as trailing carriage returns 2914 trimlines() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2915 2916 # ignore leading/trailing spaces, as well as trailing carriage returns 2917 trimsides() { awk '{ gsub(/^ +| *\r?$/, ""); print; fflush() }' "$@"; } 2918 2919 # ignore trailing spaces, as well as trailing carriage returns 2920 trimtrail() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2921 2922 # ignore trailing spaces, as well as trailing carriage returns 2923 trimtrails() { awk '{ gsub(/ *\r?$/, ""); print; fflush() }' "$@"; } 2924 2925 # try running a command, emitting an explicit message to standard-error 2926 # if the command given fails 2927 # try() { 2928 # "$@" || { 2929 # printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed\e[0m\n" "$*" >&2 2930 # return 255 2931 # } 2932 # } 2933 2934 # try running a command, emitting an explicit message to standard-error 2935 # if the command given fails 2936 try() { 2937 local code 2938 "$@" 2939 code=$? 2940 2941 if [ "${code}" -ne 0 ]; then 2942 printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" 2943 fi >&2 2944 return "${code}" 2945 } 2946 2947 # Transform Strings with Python; uses my tool `tbp` 2948 tsp() { tbp -s "$@"; } 2949 2950 # run the command given, trying to turn its output into TSV (tab-separated 2951 # values); uses my tool `dejson` 2952 tsvrun() { jc "$@" | dejson; } 2953 2954 # Truncate To Width limits lines to fit the window; uses my tool `tlp` 2955 ttw() { tlp line[:"$(tput cols)"] "$@"; } 2956 2957 # Time Verbosely the command given 2958 tv() { /usr/bin/time -v "$@"; } 2959 2960 # Underline (lines) with AWK 2961 uawk() { 2962 local cond="${1:-1}" 2963 [ $# -gt 0 ] && shift 2964 awk ' 2965 { low = lower = tolower($0) } 2966 '"${cond}"' { 2967 gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m") 2968 printf "\x1b[4m%s\x1b[0m\n", $0; fflush() 2969 next 2970 } 2971 { print; fflush() } 2972 ' "$@" 2973 } 2974 2975 # Underline Every few lines: make groups of 5 lines (by default) stand out by 2976 # underlining the last line of each 2977 ue() { 2978 local n="${1:-5}" 2979 [ $# -gt 0 ] && shift 2980 awk -v n="$n" ' 2981 BEGIN { if (n == 0) n = -1 } 2982 NR % n == 0 && NR != 1 { 2983 gsub(/\x1b\[0m/, "\x1b[0m\x1b[4m") 2984 printf("\x1b[4m%s\x1b[0m\n", $0); fflush() 2985 next 2986 } 2987 { print; fflush() } 2988 ' "$@" 2989 } 2990 2991 # deduplicate lines, keeping them in their original order 2992 unique() { awk '!c[$0]++ { print; fflush() }' "$@"; } 2993 2994 # make utf text into proper unix-style lines; uses my tool `utfate` 2995 unixify() { utfate "$@" | awk '{ gsub(/\r$/, ""); print; fflush() }'; } 2996 2997 # go UP n folders, or go up 1 folder by default 2998 up() { 2999 if [ "${1:-1}" -le 0 ]; then 3000 cd . 3001 return $? 3002 fi 3003 3004 cd "$(printf "%${1:-1}s" "" | sed 's- -../-g')" || return $? 3005 } 3006 3007 # convert United States Dollars into CAnadian Dollars, using the latest 3008 # official exchange rates from the bank of canada; during weekends, the 3009 # latest rate may be from a few days ago; the default amount of usd to 3010 # convert is 1, when not given 3011 usd2cad() { 3012 local site='https://www.bankofcanada.ca/valet/observations/group' 3013 local csv_rates="${site}/FX_RATES_DAILY/csv" 3014 local url 3015 url="${csv_rates}?start_date=$(date -d '3 days ago' +'%Y-%m-%d')" 3016 curl -s "${url}" | awk -F, -v amount="$(echo "${1:-1}" | sed 's-_--g')" ' 3017 /USD/ { for (i = 1; i <= NF; i++) if($i ~ /USD/) j = i } 3018 END { gsub(/"/, "", $j); if (j != 0) printf "%.2f\n", amount * $j }' 3019 } 3020 3021 # Unixify Text emits proper unix-style lines; uses my tool `utfate` 3022 ut() { utfate "$@" | awk '{ gsub(/\r$/, ""); print; fflush() }'; } 3023 3024 # View with `less` 3025 v() { less -JMKiCRS "$@"; } 3026 3027 # run a command, showing its success/failure right after 3028 verdict() { 3029 local code 3030 "$@" 3031 code=$? 3032 3033 if [ "${code}" -eq 0 ]; then 3034 printf "\n\e[38;2;0;135;95m%s \e[48;2;0;135;95m\e[38;2;255;255;255m succeeded \e[0m\n" "$*" 3035 else 3036 printf "\n\e[38;2;204;0;0m%s \e[48;2;204;0;0m\e[38;2;255;255;255m failed with error code %d \e[0m\n" "$*" "${code}" 3037 fi >&2 3038 return "${code}" 3039 } 3040 3041 # run `cppcheck` with even stricter options 3042 vetc() { 3043 cppcheck --enable=portability --enable=style --check-level=exhaustive "$@" 3044 } 3045 3046 # run `cppcheck` with even stricter options, also checking for c89 compliance 3047 vetc89() { 3048 cppcheck --enable=portability --enable=style \ 3049 --check-level=exhaustive --std=c89 "$@" 3050 } 3051 3052 # run `cppcheck` with even stricter options 3053 vetcpp() { 3054 cppcheck --enable=portability --enable=style --check-level=exhaustive "$@" 3055 } 3056 3057 # check shell scripts for common gotchas, avoiding complaints about using 3058 # the `local` keyword, which is widely supported in practice 3059 vetshell() { shellcheck -e 3043 "$@"; } 3060 3061 # View with Header runs `less` without line numbers, with ANSI styles, no 3062 # line-wraps, and using the first n lines as a sticky-header (1 by default), 3063 # so they always show on top 3064 vh() { 3065 local n="${1:-1}" 3066 [ $# -gt 0 ] && shift 3067 less --header="$n" -JMKiCRS "$@" 3068 } 3069 3070 vh2() { less --header=2 -JMKiCRS "$@"; } 3071 3072 # VIEW the result of showing a command, then RUNning it, using `less` 3073 viewrun() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; } 3074 3075 # View Nice Columns; uses my tools `realign` and `nn` 3076 vnc() { realign "$@" | nn --gray | less -JMKiCRS; } 3077 3078 # View Nice Hexadecimals; uses my tool `nh` 3079 vnh() { nh "$@" | less -JMKiCRS; } 3080 3081 # View Nice Json / Very Nice Json; uses my tools `nj` and `nn` 3082 vnj() { nj "$@" | less -JMKiCRS; } 3083 3084 # View Very Nice Json with Nice Numbers; uses my tools `nj` and `nn` 3085 vnjnn() { nj "$@" | nn --gray | less -JMKiCRS; } 3086 3087 # View Nice Numbers; uses my tool `nn` 3088 vnn() { nn "${@:---gray}" | less -JMKiCRS; } 3089 3090 # View Nice Table / Very Nice Table; uses my tools `nt` and `nn` 3091 vnt() { 3092 awk '{ gsub(/\r$/, ""); printf "%d\t%s\n", NR - 1, $0; fflush() }' "$@" | 3093 nt | nn --gray | 3094 awk '(NR - 1) % 5 == 1 && NR > 1 { print "" } { print; fflush() }' | 3095 less -JMKiCRS #--header=1 3096 } 3097 3098 # View-Run using `less`: show a command, then run it 3099 # vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less --header=1 -JMKiCRS; } 3100 3101 # View-Run using `less`: show a command, then run it 3102 vr() { { printf "\e[7m%s\e[0m\n" "$*"; "$@"; } | less -JMKiCRS; } 3103 3104 # View Text with `less` 3105 # vt() { less -JMKiCRS "$@"; } 3106 3107 # View Text with the `micro` text-editor in read-only mode 3108 vt() { micro -readonly true "$@"; } 3109 3110 # What are these (?); uses my command `nwat` 3111 # w() { nwat "$@"; } 3112 3113 # What Are These (?) shows what the names given to it are/do 3114 wat() { 3115 local arg 3116 local gap=0 3117 3118 if [ $# -eq 0 ]; then 3119 printf "\e[38;2;204;0;0mwat: no names given\e[0m\n" >&2 3120 return 1 3121 fi 3122 3123 for arg in "$@"; do 3124 [ "${gap}" -gt 0 ] && printf "\n" 3125 gap=1 3126 # printf "\e[48;2;218;218;218m%-80s\e[0m\n" "${arg}" 3127 printf "\e[7m%-80s\e[0m\n" "${arg}" 3128 3129 while alias "${arg}" > /dev/null 2> /dev/null; do 3130 arg="$(alias "${arg}" | sed -E "s-^[^=]+=['\"](.+)['\"]\$-\\1-")" 3131 done 3132 3133 if type "${arg}" > /dev/null 2> /dev/null; then 3134 type "${arg}" | awk 'NR == 1 && / is a function$/ { next } 1' 3135 else 3136 printf "\e[38;2;204;0;0m%s not found\e[0m\n" "${arg}" 3137 fi 3138 done | less -JMKiCRS 3139 } 3140 3141 # Word-Count TSV, runs the `wc` app using all stats, emitting tab-separated 3142 # lines instead 3143 wctsv() { 3144 printf "file\tbytes\tlines\tcharacters\twords\tlongest\n" 3145 stdbuf -oL wc -cmlLw "${@:--}" | sed -E -u \ 3146 's-^ *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^ ]*) *([^\r]*)$-\6\t\4\t\1\t\3\t\2\t\5-' | 3147 awk ' 3148 NR > 1 { print prev; fflush() } 3149 { prev = $0 } 3150 END { if (NR == 1 || !/^total\t/) print } 3151 ' 3152 } 3153 3154 # get weather forecasts, almost filling the terminal's current width 3155 # weather() { 3156 # printf "%s~%s\r\n\r\n" "$*" "$(($(tput cols) - 2))" | 3157 # curl --show-error -s telnet://graph.no:79 | 3158 # sed -E \ 3159 # -e 's/ *\r?$//' \ 3160 # -e '/^\[/d' \ 3161 # -e 's/^ *-= *([^=]+) +=- *$/\1\n/' \ 3162 # -e 's/-/\x1b[38;2;196;160;0m●\x1b[0m/g' \ 3163 # -e 's/^( +)\x1b\[38;2;196;160;0m●\x1b\[0m/\1-/g' \ 3164 # -e 's/\|/\x1b[38;2;52;101;164m█\x1b[0m/g' \ 3165 # -e 's/#/\x1b[38;2;218;218;218m█\x1b[0m/g' \ 3166 # -e 's/\^/\x1b[38;2;164;164;164m^\x1b[0m/g' \ 3167 # -e 's/\*/○/g' | 3168 # awk 1 | 3169 # less -JMKiCRS 3170 # } 3171 3172 # get weather forecasts; uses my tool `nwf` 3173 weather() { nwf "$@"; } 3174 3175 # Weather Forecast 3176 wf() { 3177 printf "%s\r\n\r\n" "$*" | curl --show-error -s telnet://graph.no:79 | 3178 awk '{ print; fflush() }' | less -JMKiCRS 3179 } 3180 3181 # WGet to standard output 3182 wg() { wget -O - "$@"; } 3183 3184 # WGET to standard Output 3185 wgeto() { wget -O - "$@"; } 3186 3187 # recursively find all files with trailing spaces/CRs 3188 wheretrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; } 3189 3190 # recursively find all files with trailing spaces/CRs 3191 whichtrails() { rg -c --line-buffered '[ \r]+$' "${@:-.}"; } 3192 3193 # turn all full linux/unix-style paths (which start from the filesystem root) 3194 # detected into WINdows-style PATHS 3195 winpaths() { 3196 awk '{ print; fflush() }' "$@" | 3197 sed -u -E 's-(/mnt/([A-Za-z])(/))-\u\2:/-g' 3198 } 3199 3200 # run `xargs`, using whole lines as extra arguments 3201 # x() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; } 3202 3203 # run `xargs`, using whole lines as extra arguments 3204 # x() { 3205 # awk -v ORS='\000' ' 3206 # FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 3207 # { gsub(/\r$/, ""); print; fflush() } 3208 # ' | xargs -0 "$@" 3209 # } 3210 3211 # run `xargs`, using zero/null bytes as the extra-arguments terminator 3212 x0() { xargs -0 "$@"; } 3213 3214 # run `xargs`, using whole lines as extra arguments 3215 # xl() { tr -d '\r' | tr '\n' '\000' | xargs -0 "$@"; } 3216 3217 # run `xargs`, using whole lines as extra arguments 3218 xl() { 3219 awk -v ORS='\000' ' 3220 FNR == 1 { gsub(/^\xef\xbb\xbf/, "") } 3221 { gsub(/\r$/, ""); print; fflush() } 3222 ' | xargs -0 "$@" 3223 } 3224 3225 # Youtube AAC audio 3226 yaac() { yt-dlp -f 140 "$@"; } 3227 3228 # Youtube MP4 video 3229 ymp4() { yt-dlp -f 22 "$@"; } 3230 3231 # Youtube Audio Player 3232 yap() { 3233 local url 3234 # some youtube URIs end with extra playlist/tracker parameters 3235 url="$(echo "$1" | sed 's-&.*--')" 3236 mpv "$(yt-dlp -x --audio-format aac --get-url "${url}" 2> /dev/null)" 3237 } 3238 3239 # Youtube Download AAC audio 3240 ydaac() { yt-dlp -f 140 "$@"; } 3241 3242 # Youtube Download MP4 video 3243 ydmp4() { yt-dlp -f 22 "$@"; } 3244 3245 # show a calendar for the current YEAR, or for the year given 3246 year() { 3247 { 3248 # show the current date/time center-aligned 3249 printf "%20s\e[38;2;78;154;6m%s\e[0m \e[38;2;52;101;164m%s\e[0m\n\n" \ 3250 "" "$(date +'%a %b %d %Y')" "$(date +%T)" 3251 # debian linux has a different `cal` app which highlights the day 3252 if [ -e "/usr/bin/ncal" ]; then 3253 # fix debian/ncal's weird way to highlight the current day 3254 ncal -C -y "$@" | sed -E 's/_\x08(.)/\x1b[7m\1\x1b[0m/g' 3255 else 3256 cal -y "$@" 3257 fi 3258 } | less -JMKiCRS 3259 } 3260 3261 # show the current date in the YYYY-MM-DD format 3262 ymd() { date +'%Y-%m-%d'; } 3263 3264 # YouTube DownLoad Plus 3265 ytdlp() { yt-dlp "$@"; } 3266 3267 # YouTube Url 3268 ytu() { 3269 local url 3270 # some youtube URIs end with extra playlist/tracker parameters 3271 url="$(echo "$1" | sed 's-&.*--')" 3272 [ $# -gt 0 ] && shift 3273 yt-dlp "$@" --get-url "${url}" 3274 } 3275 3276 # . <( 3277 # find "$(dirname $(which clam))" -type f -print0 | 3278 # xargs -0 -n 1 basename | 3279 # awk '{ print "unset " $0; print "unalias " $0 }' 3280 # ) 2> /dev/null