- cheat sheet cmd
curl cheat.sh/ls
- Security cheat sheet
### system wide for all users
# System-wide .bashrc file for interactive bash(1) shells.
/etc/bash.bashrc
# system-wide .profile file for the Bourne shell (sh(1)
/etc/profile
/etc/environment
/etc/profile.d/my_new_update.sh
### during user login
~/.bash_profile
# executed by Bourne-compatible login shells.
~/.profile
# during open the terminal, executed by bash for non-login shells.
~/.bashrc
ssh -D <localport> <user>@<remote host>
and checking if it is working for 'ssh -D 7772 [email protected]'
ssh -o "ProxyCommand nc -x 127.0.0.1:7772 %h %p" [email protected]
ssh -L <local_port>:<remote_host from ssh_host>:<remote_port> <username>@<ssh_host>
ssh -L <local_port>:<remote_host from ssh_host>:<remote_port> <ssh_host>
ssh -L 28010:vldn337:8010 localhost
ssh -L 28010:remote_host:8010 user_name@remote_host
# destination service on the same machine as ssh_host
# localport!=remote_port (28010!=8010)
ssh -L 28010:127.0.0.1:8010 user_name@remote_host
from local port 7000 to remote 5005
ssh -L 7000:127.0.0.1:5005 [email protected]
browser(ext_host) -> 134.190.2.5 -> 134.190.200.201
[email protected]:~$ ssh -L 134.190.2.5:8091:134.190.200.201:8091 [email protected]
user@ext_host:~$ wget 134.190.2.5:8091/echo
# ssh -R <remoteport>:<local host name>:<local port> <hostname>
# localy service on port 9092 should be started
# and remotelly you can reach it out just using 127.0.0.1:7777
ssh -R 7777:127.0.0.1:9092 localhost
//TODO
local=======>remote
after that, remote can use local as proxy
first of all start local proxy (proxychains or redsock)
sudo apt install privoxy
sudo vim /etc/privoxy/config
# listen-address 127.0.0.1:9999
# forward-socks5t / http://my-login:[email protected]:8080 .
# forward-socks4a / http://my-login:[email protected]:8080 .
# or
# forward / http://my-login:[email protected]:8080
systemctl start privoxy
# locally proxy server on port 9999 should be started
ssh -D 9999 127.0.0.1 -t ssh -R 7777:127.0.0.1:9999 [email protected]
# from remote machine you can execute
wget -e use_proxy=yes -e http_proxy=127.0.0.1:7777 https://google.com
ssh suppress banner, ssh no invitation
ssh -q my_server.org
ssh verbosive, ssh log, debug ssh
ssh -vv my_server.org
app_1 --.
\
app_2 --- ---> local proxy <---> External Proxy <---> WWW
... /
app_n --'
install cntlm
# temporarily set proxy variables for curl and brew to work in this session
$ export http_proxy=http://<user>:<password>@proxy-url:proxy-port
$ export https_proxy=$http_proxy
# update & upgrade apt
$ sudo --preserve-env=http_proxy,https_proxy apt-get update
$ sudo --preserve-env=http_proxy,https_proxy apt-get upgrade
# finally, install cntlm
sudo --preserve-env=http_proxy,https_proxy apt-get install cntlm
edit configuration
vim ~/.config/cntlm/cntlm.conf
Username user-name
Domain domain-name
Proxy proxy-url:proxy-port
NoProxy localhost, 127.0.0.*, 10.*, 192.168.*, *.zur
Listen 3128
or globally
sudo vim /etc/cntlm.conf
~/bin/proxy-start.sh
#!/bin/sh
pidfile=~/.config/cntlm/cntlm.pid
if [ -f $pidfile ]; then
kill "$(cat $pidfile)"
sleep 2
fi
cntlm -c ~/.config/cntlm/cntlm.conf -P $pidfile -I
source ~/bin/proxy-settings.sh
proxy_url="http://127.0.0.1:3128"
export http_proxy=$proxy_url
export https_proxy=$http_proxy
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
export _JAVA_OPTIONS="-Dhttp.proxyHost=127.0.0.1 -Dhttp.proxyPort=3128 -Dhttps.proxyHost=127.0.0.1 -Dhttps.proxyPort=3128 -Dhttps.nonProxyHosts=localhost|*.ubsgroup.net|*.muc -Dhttp.nonProxyHosts=localhost|*.ubsgroup.net|*.zur"
check status
sudo invoke-rc.d cntlm status
ss -lt | grep 3128
# open access
ping -s 120 -c 1 146.255.193.66
ping -s 121 -c 1 146.255.193.66
ping -s 122 -c 1 146.255.193.66
# close access
ping -s 123 -c 1 146.255.193.66
open ports, open connections, listening ports, application by port, application port, process port, pid port
# list of open files
sudo lsof -i -P -n | grep LISTEN
# list of open connections
sudo netstat -tulpan | grep LISTEN
# print pid of process that occupying 9999 port
sudo ss -tulpan 'sport = :9999'
# open input output
iotop
# <drive> <path>
sudo mount /dev/sdd /tin
sudo mkdir /mnt/vendor-cluster-prod
sudo sshfs -o allow_other,IdentityFile=~/.ssh/id_rsa [email protected]:/remote/path/folder /mnt/vendor-cluster-prod
# sudo fusermount -u /remote/path/folder
# sudo umount /remote/path/folder
sudo apt install curlftpfs
sudo mkdir /mnt/samsung-note
curlftpfs testuser:[email protected]:2221 /mnt/samsung-note/
sudo apt install nfs-common
sudo apt install cifs-utils
sudo mkdir -p /mnt/windows-computer
USER_NAME='my-username'
USER_DOMAIN='ZUR'
USER_SERVER='//u015029.ubsbank.net/home$/x453337/'
sudo mount -t cifs -o auto,gid=$(id -g),uid=$(id -u),username=$USER_NAME,domain=$USER_DOMAIN,vers=2.1 $USER_SERVER /mnt/windows-computer
bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.<type> helper program.
sudo apt-get install nfs-common
sudo apt-get install cifs-utils
sudo mount /dev/sdd /media/tina-team
umount /dev/sdd
sudo mkdir /mnt/disks/k8s-local-storage1
sudo chmod 755 /mnt/disks/k8s-local-storage1
sudo ln -s /mnt/disks/k8s-local-storage1/nfs nfs1
ls -la /mnt/disks
ls -la /mnt
sudo blkid
sudo vim /etc/fstab
# add record
# UUID=42665716-1f89-44d4-881c-37b207aecb71 /mnt/disks/k8s-local-storage1 ext4 defaults 0 0
# refresg fstab reload
sudo mount -av
ls /mnt/disks/k8s-local-storage1
option 2
sudo vim /etc/fstab
# add line
# /dev/disk/by-uuid/8765-4321 /media/usb-drive vfat 0 0
# copy everything from ```mount```
# /dev/sdd5 on /media/user1/e91bd98f-7a13-43ef-9dce-60d3a2f15558 type ext4 (rw,nosuid,nodev,relatime,uhelper=udisks2)
# /dev/sda1 on /media/kali/usbdata type fuseblk (rw,nosuid,nodev,relatime,user_id=0,group_id=0,default_permissions,allow_other,blksize=4096,uhelper=udisks2)
# systemctl daemon-reload
sudo mount -av
mount remote drive via network
10.55.0.3:/mnt/disks/k8s-local-storage/nfs /mnt/nfs nfs rw,noauto,x-systemd.automount,x-systemd.device-timeout=10,timeo=14 0 0
blkid
lsblk
fdisk -l
sudo mkfs -t xfs /dev/xvdb
sudo mke2fs /dev/xvdb
kgpg --keyserver keyserver.ubuntu.com --recv-keys 9032CAE4CBFA933A5A2145D5FF97C53F183C045D
gpg --import john-brooks.asc
gpg --verify ricochet-1.1.4-src.tar.bz2.asc
gpg --keyserver keyserver.ubuntu.com --recv-keys D09FB15F1A24768DDF1FA29CCFEEF31651B5FDE8
# generate new RSA keys, create RSA
ssh-keygen -t rsa
( check created file /home/{user}/.ssh/id_rsa )
# if you have copied it, check permissions
chmod 700 ~/.ssh
chmod 700 ~/.ssh/*
ssh-copy-id {username}@{machine ip}:{port}
ssh-copy-id -i ~/.ssh/id_rsa.pub -o StrictHostKeyChecking=no [email protected]
# manual execution
cat ~/.ssh/id_rsa.pub | ssh [email protected] 'cat >> ~/.ssh/authorized_keys'
# output nothing when ssh key exists, ssh check
ssh-copy-id [email protected] 2>/dev/null
login without typing password
sshpass -p my_password ssh [email protected]
automate copying password
./ssh-copy.expect my_user ubsad00015.vantage.org "my_passw"
#!/usr/bin/expect -f
set user [lindex $argv 0];
set host [lindex $argv 1];
set password [lindex $argv 2];
spawn ssh-copy-id $user@$host
expect "])?"
send "yes\n"
expect "password: "
send "$password\n"
expect eof
sometimes need to add next
ssh-agent bash
ssh-add ~/.ssh/id_dsa or id_rsa
remove credentials ( undo previous command )
ssh-keygen -f "/home/{user}/.ssh/known_hosts" -R "10.140.240.105"
copy ssh key to remote machine, but manually:
cat .ssh/id_rsa.pub | ssh {username}@{ip}:{port} "cat >> ~/.ssh/authorized_keys"
chmod 700 ~/.ssh ;
chmod 600 ~/.ssh/authorized_keys
issue broken pipe ssh
vim ~/.ssh/config
Host *
ServerAliveInterval 30
ServerAliveCountMax 5
ssh -o StrictHostKeyChecking=no [email protected]
sshpass -p my_password ssh -o StrictHostKeyChecking=no [email protected]
# check ssh-copy-id, check fingerprint
ssh-keygen -F bmw000013.adv.org
# return 0 ( and info line ), return 1 when not aware about the host
$ ls ~/.ssh
-rw------- id_rsa
-rw------- id_rsa_bmw
-rw-r--r-- id_rsa_bmw.pub
-rw-r--r-- id_rsa.pub
$ cat ~/.ssh/config
IdentityFile ~/.ssh/id_rsa_bmw
IdentityFile ~/.ssh/id_rsa
scp filename.txt [email protected]:~/temp/filename-from-local.txt
scp -r [email protected]:~/temp/filename-from-local.txt filename.txt
scp -pr /source/directory user@host:the/target/directory
the same as local copy folder
cp -var /path/to/folder /another/path/to/folder
cp -r --preserve=mode,ownership,timestamps /path/to/src /path/to/dest
cp -r --preserve=all /path/to/src /path/to/dest
# change owner recursively for current folder and subfolders
sudo chown -R $USER .
# print diff
diff -qr /tmp/first-folder/ /tmp/second-folder
# local sync
rsync -r /tmp/first-folder/ /tmp/second-folder
## Attributes Verbosive Unew-modification-time
rsync -avu /tmp/first-folder/ /tmp/second-folder
# sync remote folder to local ( copy FROM remote )
rsync -avz [email protected]:~/test-2020-02-28 /home/projects/temp/test-2020-02-28
# sync remote folder to local ( copy FROM remote ) with specific port
rsync -avz -e 'ssh -p 2233' [email protected]:~/test-2020-02-28 /home/projects/temp/test-2020-02-28
# sync local folder to remote ( copy TO remote )
rsync -avz /home/projects/temp/test-2020-02-28 [email protected]:~/test-2020-02-28
# sync local folder to remote ( copy TO remote ) include exclude
rsync -avz --include "*.txt" exclude "*.bin" /home/projects/temp/test-2020-02-28 [email protected]:~/test-2020-02-28
function cluster-prod-generation-sync-to(){
if [[ $1 == "" ]]; then
return 1
fi
rsync -avz . $USER_GT_LOGIN@ubsdpd00013.vantage.org:~/$1
}
create directory on remote machine, create folder remotely, ssh execute command, ssh remote execution
ssh user@host "mkdir -p /target/path/"
each_node="bpde00013.ubsbank.org"
REMOTE_SCRIPT="/opt/app/1.sh"
REMOTE_OUTPUT_LOG="/var/log/1.output"
ssh $REMOTE_USER"@"$each_node "nohup $REMOTE_SCRIPT </dev/null > $REMOTE_OUTPUT_LOG 2>&1 &"
export DISPLAY=:0.0
xterm
sftp -P 2222 my_user@localhost << END_FILE_MARKER
ls
exit
END_FILE_MARKER
# map local /tmp folder to another path/drive
sudo mount -B /tmp /mapped_drive/path/to/tmp
sudo mount /dev/cdrom /mnt
mkdir -p /mnt/my-ram
mount -t tmpfs tmpfs /mnt/my-ram -o size=1024M
watch -n 60 'ls -la | grep archive'
ls *.txt | entr firefox
!!
!?flow
. goto-command.sh
pushd
popd
dirs
cd -
shutdown -r now
# simple sort
sort <filename>
# sort by column ( space delimiter )
sort -k 3 <filename>
# sort by column number, with delimiter, with digital value ( 01, 02....10,11 )
sort -g -k 11 -t "/" session.list
# sort with reverse order
sort -r <filename>
cat -n <filename>
split --bytes=1M /path/to/image/image.jpg /path/to/image/prefixForNewImagePieces
cat prefixFiles* > newimage.jpg
cat --lines=17000 big_text_file.txt
uniq -c
print only duplicates ( distinct )
uniq -d
print all duplications
uniq -D
uniq -u
cut --delimiter "," --fields 2,3,4 test1.csv
cut --delimiter "," -f2,3,4 test1.csv
substring with fixed number of chars: from 1.to(15) and 1.to(15) && 20.to(50)
cut -c1-15
cut -c1-15,20-50
echo "text file" | grep "" > $random_script_filename
# read log
tail -f /var/log/syslog
# write to system log
echo "test" | /usr/bin/logger -t cronjob
# write log message to another system
logger --server 192.168.1.10 --tcp "This is just a simple log line"
/var/log/messages
sudo cat /etc/apt/sources.list*
add-apt-repository ppa:inkscape.dev/stable
you can find additional file into
/etc/apt/sources.list.d
search after adding
apt-cache search inkscape
update from one repo, single update
sudo apt-get update -o Dir::Etc::sourcelist="sources.list.d/cc-ros-mirror.list" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"
sudo rm /etc/apt/sources.list.d/inkscape.dev*
add space before command
~/.inputrc
"\e[A": history-search-backward
"\e[B": history-search-forward
set show-all-if-ambiguous on
set completion-ignore-case on
TAB: menu-complete
"\e[Z": menu-complete-backward
set show-all-if-unmodified on
set show-all-if-ambiguous on
# stop execution when non-zero exit
set -e
# stop execution when error happend even inside pipeline
set -eo pipeline
# stop when access to unknown variable
set -u
# print each command before execution
set -x
# export source export variables
set -a
source file-with-variables.env
ctrl+x+e
fc
working folder
pwd
pwdx <process id>
--extra-vars 'rpm_version=$(cat version.txt)'
--extra-vars 'rpm_version=`cat version.txt`'
original.sh $*
folder: /etc/rc1.d ( rc2.d ... )
contains links to ../init.d/<name of bash script>
should understand next options: start, stop, restart
#! /bin/sh
# /etc/init.d/blah
#
# Some things that run always
touch /var/lock/blah
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting script blah "
;;
stop)
echo "Stopping script blah"
;;
*)
echo "Usage: /etc/init.d/blah {start|stop}"
exit 1
;;
esac
exit 0
or custom service, service destination
sudo vim /etc/systemd/system/YOUR_SERVICE_NAME.service
Description=GIVE_YOUR_SERVICE_A_DESCRIPTION
Wants=network.target
After=syslog.target network-online.target
[Service]
Type=simplesystemctl
ExecStart=YOUR_COMMAND_HERE
Restart=on-failure
RestartSec=10
KillMode=process
[Install]
WantedBy=multi-user.target
service with docker container, service dockerized app
[Unit]
Description=Python app
After=docker.service
Requires=docker.service
[Service]
TimeoutStartSec=5
Restart=always
ExecStartPre=-/usr/bin/docker stop app
ExecStartPre=-/usr/bin/docker rm app
ExecStart=/usr/bin/docker run \
--env-file /home/user/.env.app \
--name app \
--publish 5001:5001 \
appauth
ExecStop=/usr/bin/docker stop app
[Install]
WantedBy=multi-user.target
managing services
# alternative of chkconfig
# alternative of sysv-rc-conf
# list all services
systemctl --all
systemctl enable YOUR_SERVICE_NAME
systemctl start YOUR_SERVICE_NAME
systemctl status YOUR_SERVICE_NAME
systemctl daemon-reload YOUR_SERVICE_NAME
systemctl stop YOUR_SERVICE_NAME
reset X-server, re-start xserver, reset linux gui ubuntu only
Ctrl-Alt-F1
sudo init 3
sudo init 5
sudo pkill X
sudo service lightdm stop
sudo service lightdm force-reload
start
sudo startx
sudo service lightdm start
apt-get install xdotool
xdotool windowactivate $each_window
xdotool key --window $each_window Return alt+f e Down Down Return
ls -lR . | grep ^l
cat secrets | grep ".*Name.*Avvo.*"
grep -ir --exclude-dir=node_modules "getServerSideProps"
readlink -f {file}
readlink -f `dirname $0`
realpath {file}
or
python -c 'import os.path; print(os.path.realpath("symlinkName"))'
basename {file}
dirname {file}
nautilus "$(dirname -- "$PATH_TO_SVG_CONFLUENCE")"
ls -d <path to folder>/*
find $FOLDER -maxdepth 4 -mindepth 4 | xargs ls -lad
readlink 'path to symlink'
which "program-name"
# issue with permission ( usually on NFS or cluster )
# find: '/mnt/nfs/ml-training-mongodb-pvc/journal': Permission denied
#
# solution:
sudo docker run --volume /mnt/nfs:/nfs -it busybox /bin/sh
chmod -R +r /nfs/ml-training-mongodb-pvc/journal
locate {file name}
exclude DB
/etc/updatedb.conf
locate -ir "brand-reader*"
locate -b "brand-reader"
you need to update filedatabase: /var/lib/mlocate/mlocate.db
sudo updatedb
find . -name "prd-ticket-1508.txt" 2>&1 | grep -v "Permission denied"
find . -name "*.j2" -o -name "*.yaml"
find / -mmin 2
find . -exec md5sum {} \;
find . -name "*.json" | while read each_file; do cat "$each_file" > "${each_file}".txt; done
find ./my_dir -mtime +5 -type f -delete
# default variable, env var default
find ${IMAGE_UPLOAD_TEMP_STORAGE:-/tmp/image_upload} -mtime +1 -type f -delete
find /tmp -maxdepth 1 -name "native-platform*" -mmin +240 | xargs -I {} sudo rm -r {} \; >/dev/null 2>&1
find /tmp -maxdepth 1 -mmin +240 -iname "[0-9]*\-[0-9]" | xargs -I {} sudo rm -r {} \; >/dev/null 2>&1
find . -type f -size +50000k -exec ls -lh {} \;
find . -type f -size +50000k -exec ls -lh {} \; | awk '{ print $9 ": " $5 }'
find . -maxdepth 5 -mindepth 5
find /mapr/vantage/data/store/processed/*/*/*/*/*/Metadata/file_info.json
find . -type d -name "dist" ! -path "*/node_modules/*"
type <function name>
declare -f <function name>
cd()
{
# builtin going to execute not current, but genuine function
builtin cd /home/projects
}
folder size, dir size, directory size, size directory, size folder size of folder, size of directory
sudo du -shc ./*
sudo du -shc ./* | sort -rh | head -5
df -ha
df -hT /
# size of folder
du -sh /home
# size my sub-folders
du -mh /home
# print first 5 leaders of size-consumers
# slow way: du -a /home | sort -n -r | head -n 5
sudo du -shc ./* | sort -rh | head -5
du -ch /home
# find only files with biggest size ( top 5 )
find -type f -exec du -Sh {} + | sort -rh | head -n 5
yum list {pattern}
( example: yum list python33 )
yum install {package name}
yum repolist all
yum info {package name}
yumdb info {package name}
rpm -qa --last
rpm -qai
rpm -qaic
rpm -qi wd-tomcat8-app-brandserver
fg, bg, jobs
ctrl-Z
bg
ctrl-Z
fg
resume process by number into list 'jobs'
fg 2
bash
exec > output-file.txt
date
# the same as 'exit'
exec <&-
cat output-file.txt
gen_version="5.2.1"
$(find /mapr/dp.prod/vantage/data/processed/gen/$gen_version/ -maxdepth 5 -mindepth 5 | awk -F '/' '{print $14}' > gt-$gen_version.list)
bash
exec ls -la
echo "ls" | xargs -i sh -c "{}"
ctrl-Z
disown -a && exit
for graphical applications DISPLAY must be specified
- using built-in editor
at now + 5 minutes
at> DISPLAY=:0 rifle /path/to/image
^D
- using inline execution
echo "DISPLAY=:0 rifle /path/to/image/task.png" | at now + 1 min
echo "DISPLAY=:0 rifle /path/to/image/task.png" | at 11:01
strace -e open,access <command to run application>
ps fC firefox
pgrep firefox
pid of process by name
pidof <app name>
pidof chrome
process list, process tree
ps axjf
ps -ef --forest
ps -fauxw
process full command, ps full, ps truncate
ps -ewwo pid,cmd
windows analogue of 'ps aux'
wmic path win32_process get Caption, Processid, Commandline
output to log stop process
rm -rf -- !(exclude-filename.sh)
You have to escape the % signs with % where is file located
sudo less /var/spool/cron/crontabs/$USER
cron activating
sudo service cron status
all symbols '%' must be converted to '%'
# edit file
crontab -e
# list of all jobs
crontab -l
adding file with cron job
echo " * * * * echo `date` >> /out.txt" >> print-date.cron
chmod +x print-date.cron
crontab print-date.cron
logs
sudo tail -f /var/log/syslog
is cron running
ps -ef | grep cron | grep -v grep
start/stop/restart cron
systemctl start cron
systemctl stop cron
systemctl restart cron
# skip first line in output
docker ps -a | awk '{print $1}' | tail -n +2
./hbase.sh 2>/dev/null
sudo python3 echo.py > out.txt 2>&1 &
sudo python3 echo.py &> out.txt &
sudo python3 echo.py > out.txt &
grep -nr "text for search" .
# need to set * or mask for files in folder !!!
grep -s "search_string" /path/to/folder/*
sed -n 's/^search_string//p' /path/to/folder/*
# grep in current folder
grep -s "search-string" * .*
grep -B 4
grep --before 4
grep -A 4
grep --after 4
oc describe pod/gateway-486-bawfps | awk '/Environment:/,/Mounts:/'
grep -rn '.' -e '@Table'
grep -ilR "@Table" .
cat file.txt | grep -e "occurence1" -e "occurence2"
cat file.txt | grep -e "occurence1\|occurence2"
cat file.txt | grep -e "occurence1" | grep -e "occurence2"
cat file.txt | grep -v "not-include-string"
cat file.txt | grep -v -e "not-include-string" -e "not-include-another"
grep -ir "memory" --include="*.scala"
grep -ir --include=README.md ".*base" 2>/dev/null
echo "BN_FASDLT/1/20200624T083332_20200624T083350_715488_BM60404_BN_FASDLT.MF4" | awk -F "/" '{print $NF}' | grep "[0-9]\{8\}"
grep -rH -A 2 "@angular/core"
grep -ir --include=README.md "base" 2>/dev/null
grep -lir 'password'
grep -F -x -f path-to-file1 path-to-file2
grep --fixed-strings --line-regexp -f path-to-file1 path-to-file2
diff -w file1.txt file2.txt
diff -c file1.txt file2.txt
apt install dateutils
dateutils.ddiff -i '%Y%m%d%H%M%S' -f '%y %m %d %H %M %S' 20160312000101 20170817040001
/etc/systemd/timesyncd.conf.d/90-time-sync.conf
[Time]
NTP=ntp.ubuntu.com
FallbackNTP=ntp.ubuntu.com
restart time sync service
timedatectl set-ntp true && systemctl restart systemd-timesyncd.service
array = echo $result | tr {}, ' '
echo "hello World" | tr '[:lower:]' '[:upper:]
echo "hello World 1234 woww" | tr -dc 'a-zA-Z'
sed --in-place 's/LinkedIn/Yahoo/g' *
# replace tab symbol with comma symbol
sed --in-place 's/\t/,/g' one_file.txt
# going to add new line in property file without editor
sed --in-place 's/\[General\]/\[General\]\nenable_trusted_host_check=0/g' matomo-php.ini
date +%H:%M:%S:%s
date +%Y-%m-%d-%H:%M:%S:%s
# output file with currenttime
python3 /imap-message-reader.py > message_reader`date +%H:%M:%S`.txt
urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | tr '[:upper:]' '[:lower:]' | head -n 1
zgrep "message_gateway_integration" /var/lib/brand-server/cache/zip/*.zip
ls -1 *.zip | xargs -I{} unzip -p {} brand.xml | grep instant-limit | grep "\\."
unzip file.zip -d output_folder
unzip -o file.zip -d output_folder
wget -qO- https://nodejs.org/dist/v10.16.3/node-v10.16.3-linux-x64.tar.xz | tar xvz - -C /target/directory
tar zxvf backup.tgz
echo "hello from someone" | tee --append out.txt
echo "hello from someone" | tee --append out.txt > /dev/null
vi wrap( :set wrap, :set nowrap )
shortcut | description |
---|---|
/ | search forward |
? | search backward |
n | next occurence |
N | prev occurence |
export PROMPT_COMMAND="echo -n \[\$(date +%H:%M:%S)\]\ "
.bashrc of ubuntu
if [ "$color_prompt" = yes ]; then
# PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@$(date +%d%m_%H%M)\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
# PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
PS1='${debian_chroot:+($debian_chroot)}\u:\$(date +%d.%m_%H:%M)\w\$ '
fi
unset color_prompt force_color_prompt
# green
export PS1=`printf "\033[31m$ staging \033[39m"`
# red
export PS1=`printf "\033[32m$ staging \033[39m"`
Color Foreground Background
Black \033[30m \033[40m
Red \033[31m \033[41m
Green \033[32m \033[42m
Orange \033[33m \033[43m
Blue \033[34m \033[44m
Magenta \033[35m \033[45m
Cyan \033[36m \033[46m
Light gray \033[37m \033[47m
Fallback to distro's default \033[39m \033[49m
echo $?
cat /proc/meminfo
cat /proc/sys/fs/file-max
mimetype -d {filename}
xdg-open {filename}
w3m {filename}
sensible-browser http://localhost:3000/api/status
x-www-browser http://localhost:3000/api/status
# for MacOS
open http://localhost:3000/api/status
wget --method=POST http://{host}:9000/published/resources/10050001.zip
wget -O- http://{host}:8500/wd-only/getBrandXml.jsp?brand=229099017 > /dev/null 2>&1
wget -nv -O- http://{host}:8500/wd-only/getBrandXml.jsp?brand=229099017 2>/dev/null
wget -O out.zip http://{host}:9000/published/resources/10050001.zip
# in case of complex output path
curl -s http://{host}:9000/published/resources/10050001.zip --create-dirs -o /home/path/to/folder/file.zip
wget http://host:9090/wd-only/1005000.zip --directory-prefix="/home/temp/out"
wget --no-check-certificate https://musan999999.mueq.adas.intel.com:8888/data-api/session/
wget --tries=1 --timeout=5 --no-check-certificate https://musan999999.mueq.adas.intel.com:8888/data-api/session/
wget -e use_proxy=yes -e http_proxy=127.0.0.1:7777 https://mail.ubsgroup.net/
or just with settings file "~/.wgetrc"
use_proxy = on
http_proxy = http://username:[email protected]:port/
https_proxy = http://username:[email protected]:port/
ftp_proxy = http://username:[email protected]:port/
zip -r bcm-1003.zip *
zip --junk_paths bcm-1003.zip *
alias sublime_editor=/Applications/SublimeEditor/Sublime
subl(){
sublime_editor "$1" &
}
alias sublime_editor
type subl
sed cheat sheet, replace
replace "name" with "nomen" string
sed 's/name/nomen/g'
# replace only second occurence
# echo "there is a test is not a sentence" | sed 's/is/are/2'
example of replacing all occurences in multiply files
for each_file in `find -name "*.java"`; do
sed --in-place 's/vodkafone/cherkavi/g' $each_file
done
locate -ir "/zip$" | sed -n '2p'
/usr/bin/bash^M: bad interpreter: No such file or directory
solution
sed -i -e 's/\r$//' Archi-Ubuntu.sh
ps -aux | awk 'BEGIN{a=0}{a=a+1}END{print a}'
true
false
find -cmin -2
curl -u username:password http://example.com
curl --head http://example.com
curl -L http://example.com
curl -X PUT --header "Content-Type: application/vnd.wirecard.brand.apis-v1+json;charset=ISO-8859-1" -H "x-username: cherkavi" [email protected] http://q-brands-app01.wirecard.sys:9000/draft/brands/229099017/model/country-configurations
curl -X POST http://localhost:8983/solr/collection1/update?commit=true -H "Content-Type: application/json" --data '{"add":"data"}'
curl -X POST http://localhost:8983/solr/collection1/update?commit=true -H "Content-Type: application/json" --data-raw '{"add":"data"}'
curl -X POST http://localhost:8983/solr/collection1/update?commit=true -H "Content-Type: application/json" --data-binary '{"add":"data"}'
# or with bash variable
SOME_DATA="my_personal_value"
curl -X POST http://localhost:8983/solr/collection1/update?commit=true -H "Content-Type: application/json" --data-binary '{"add":"'$SOME_DATA'"}'
# or with data from file
curl -X POST http://localhost:8983/test -H "Content-Type: application/json" --data-binary '@/path/to/file.json'
# or with multipart body
curl -i -X POST -H "Content-Type: multipart/form-data" -F "[email protected]" -F "userid=1234" http://mysuperserver/media/upload/
# POST request GET style
curl -X POST "http://localhost:8888/api/v1/notification/subscribe?email=one%40mail.ru&country=2&state=517&city=qWkbs&articles=true&questions=true&listings=true" -H "accept: application/json"
# https://kb.objectrocket.com/elasticsearch/elasticsearch-cheatsheet-of-the-most-important-curl-requests-252
curl -X GET "https://elasticsearch-label-search-prod.vantage.org/autolabel/_search?size=100&q=moto:*&pretty"
echo "'" 'sentence' "'"
- curl -s -X GET http://google.com
- curl --silent -X GET http://google.com
- curl http://google.com 2>/dev/null
curl --insecure -s -X GET http://google.com
curl --verbose --insecure -s -X GET http://google.com
chrome extension cookies.txt
# send predefined cookie to url
curl -b path-to-cookie-file.txt -X GET url.com
# send cookie from command line
curl --cookie "first_cookie=123;second_cookie=456;third_cookie=789" -X GET url.com
# send cookie from command line
curl 'http://localhost:8000/members/json-api/auth/user' -H 'Cookie: PHPSESSID=5c5dddcd96b9f2f41c2d2f87e799feac'
# collect cookie from remote url and save in file
curl -c cookie-from-url-com.txt -X GET url.com
curl "http://some.resource/read_book.php?id=66258&p=1" | iconv --from-code WINDOWS-1251 --to-code UTF-8
airflow_trigger(){
SESSION_ID=$1
ENDPOINT=$2
BODY='{"conf":{"session_id":"'$SESSION_ID'","branch":"merge_labels"}}'
curl --silent -w "response-code: %{http_code}\n" --data-binary $BODY -u $AIRFLOW_USER:$AIRFLOW_PASSWORD -X POST $ENDPOINT
return $?
}
DAG_NAME='labeling'
airflow_trigger $each_session "https://airflow.vantage.org/api/experimental/dags/$DAG_NAME/dag_runs"
curl --show-error "http://some.resource/read_book.php?id=66258&p=1"
curl --max-time 10 -so /dev/null -w '%{time_total}\n' google.com
curl "https://{foo,bar}.com/file_[1-4].webp" --output "#1_#2.webp"
xmllint --format /path/to/file.xml > /path/to/file-formatted.xml
xmllint --noout file.xml; echo $?
echo output.json | jq .
echo '[{"id": 1, "name": "Arthur", "age": "21"},{"id": 2, "name": "Richard", "age": "32"}]' | \
jq ".[] | .name"
echo '[{"id": 1, "name": "Arthur", "age": "21"},{"id": 2, "name": "Richard", "age": "32"}]' | \
jq '.[] | if .name == "Richard" then . else empty end | [.id, .name] | @csv'
# convert from yaml to json, retrieve values from json, convert to csv
cat temp-pod.yaml | yq - r -j --prettyPrint | jq '[.metadata.namespace, .metadata.name, .spec.template.spec.nodeSelector."kubernetes.io/hostname"] | @csv'
echo '{"smart_collections":[{"id":270378401973},{"id":270378369205}]}' | jq '. "smart_collections" | .[] | .id'
# read value
cat k8s-pod.yaml | yq r - --printMode pv "metadata.name"
# convert to JSON
cat k8s-pod.yaml | yq - r -j --prettyPrint
chmod -R +x <folder name>
# remove world access
chmod -R o-rwx /opt/sm-metrics/grafana-db/data
# remove group access
chmod -R g-rwx /opt/sm-metrics/grafana-db/data
# add rw access for current user
chmod u+rw screenshot_overlayed.png
find . -name "*.sql" -print0 | xargs -0 chmod 666
mkdir -p some-folder/{1..10}/{one,two,three}
ONE="this is a test"; echo $ONE
activate environment variables from file, env file, export env, export all env, all variable from file, all var export, env var file
FILE_WITH_VAR=.env.local
source $FILE_WITH_VAR
export $(cut -d= -f1 $FILE_WITH_VAR)
# if you have comments in file
source $FILE_WITH_VAR
export `cat $FILE_WITH_VAR | awk -F= '{if($1 !~ "#"){print $1}}'`
/var/log/syslog
sudo http_proxy='http://user:@proxy.muc:8080' apt install meld
remember about escaping bash spec chars ( $,.@.... )
- .bashrc
- /etc/environment
- /etc/systemd/system/docker.service.d/http-proxy.conf
- /etc/apt/auth.conf
Acquire::http::Proxy "http://username:password@proxyhost:port";
Acquire::https::Proxy "http://username:password@proxyhost:port";
- snap
sudo snap set system proxy.http="http://user:[email protected]:8080"
sudo snap set system proxy.https="http://user:[email protected]:8080"
# leads to error: cannot connect to the server
snap install <app>
# Unmask the snapd.service:
sudo systemctl unmask snapd.service
# Enable it:
systemctl enable snapd.service
# Start it:
systemctl start snapd.service
sudo apt list -a [name of the package]
sudo apt list -a kubeadm
dpkg --add-architecture i386
dpkg --print-architecture
dpkg --print-foreign-architectures
sudo apt-get install libglib2.0-0:i386 libgtk2.0-0:i386
apt list <name of package>
apt mark hold kubeadm
# install: this package is marked for installation.
# deinstall (remove): this package is marked for removal.
# purge: this package, and all its configuration files, are marked for removal.
# hold: this package cannot be installed, upgraded, removed, or purged.
# unhold:
# auto: auto installed
# manual: manually installed
sudo apt-get install --only-upgrade {packagename}
sudo apt list
sudo dpkg -l
First letter | desired package state ("selection state") |
---|---|
u | unknown |
i | install |
r | remove/deinstall |
p | purge (remove including config files) |
h | hold |
Second letter | current package state |
---|---|
n | not-installed |
i | installed |
c | config-files (only the config files are installed) |
U | unpacked |
F | half-configured (configuration failed for some reason) |
h | half-installed (installation failed for some reason) |
W | triggers-awaited (package is waiting for a trigger from another package) |
t | triggers-pending (package has been triggered) |
Third letter | error state (you normally shouldn't see a third letter, but a space, instead) |
---|---|
R | reinst-required (package broken, reinstallation required) |
sudo apt-cache madison {package name}
sudo apt-get install {package name}={version}
sudo apt-get clean
sudo apt-get autoremove --purge
sudo apt-get --purge remote {app name}
- sudo invoke-rc.d localkube stop
- sudo invoke-rc.d localkube status ( sudo service localkube status )
- sudo update-rc.d -f localkube remove
- sudo grep -ir /etc -e "kube"
- rm -rf /etc/kubernetes
- rm -rf /etc/systemd/system/localkube.service
- vi /var/log/syslog
echo $?
sudo vmware-installer -u vmware-player
pdftk original.pdf stamp watermark.pdf output output.pdf
- lsb_release -a
- cat /etc/system-release
- uname -a
ip -4 a
ip -6 a
interfaces
sudo ifdown lo && sudo ifup lo
sudo service network-manager restart
# status of all connections
nmcli d
nmcli connection
nmcli connection up id {name from previous command}
nmcli connection down id {name of connection}
wifi_code='188790542'
point="FRITZ!Box 7400 YO"
nmcli device wifi connect "$point" password $wifi_code
# sudo cat /etc/NetworkManager/system-connections/*
sudo openconnect --no-proxy {ip-address} --user={user name}
sudo openconnect --no-cert-check --no-proxy {ip-address} --user={user name} ---servercert
example with reading redis collaboration ( package sniffer )
sudo ngrep -W byline -d docker0 -t '' 'port 6379'
# 1------------ 2-------------------- 3--------------
sudo tcpdump -nvX -v src port 6443 and src host 10.140.26.10 and dst port not 22
# and, or, not
## list of certificates
keytool -list -keystore ./src/main/resources/com/ubs/crm/data/api/rest/server/keystore_server
## generating ssl key stores
keytool -genkeypair -keystore -keystore ./src/main/resources/com/ubs/crm/data/api/rest/server/keystore_server -alias serverKey -dname "CN=localhost, OU=AD, O=UBS AG, L=Zurich, ST=Bavaria, C=DE" -keyalg RSA
# enter password...
## Importing ( updating, adding ) trusted SSL certificates
keytool -import -file ~/Downloads/certificate.crt -keystore ./src/main/resources/com/ubs/crm/data/api/rest/server/keystore_server -alias my-magic-number
# current dns
sudo cat /etc/resolv.conf
# resolving hostname
dig {hostname}
#
systemd-resolve --status
gpg --symmetric {filename}
gpg --decrypt {filename}
# encrypt
# openssl [encryption type] -in [original] -out [output file]
openssl des3 -in original.txt -out original.txt.encrypted
# decrypt
# openssl [encryption type] -d -in [encrypted file] -out [original file]
openssl des3 -d -in original.txt.encrypted -out original.txt
# list of encryptors (des3):
openssl enc -list
- adduser {username} {destination group name}
- edit file /etc/group
add :{username} to the end of line with {groupname}:x:999
sudo useradd test
sudo useradd --create-home test --groups sudo
# set password for new user
sudo passwd test
# set default bash shell
chsh --shell /bin/bash tecmint
sudo adduser vitalii sudo
# close all opened sessions
# after your work done
sudo deluser vitalii sudo
sudo -E bash -c 'python3'
sudo userdel -r test
sudo groupadd new_group
usermod --append --groups new_group my_user
id my_user
chgrp new_group /path/to/folder
sudo -E <command>
. ./airflow-get-log.sh
source ./airflow-get-log.sh
cat dag-runs-failed.id | . ./airflow-get-log.sh
users
w
who --all
cat /etc/passwd | cut --delimiter=: --fields=1
when you see message:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
use this:
ssh-keygen -R <host>
or
rm ~/.ssh/known_hosts
- proxy local, proxy for user /etc/profile.d/proxy.sh
export HTTP_PROXY=http://webproxy.host:3128
export http_proxy=http://webproxy.host:3128
export HTTPS_PROXY=http://webproxy.host:3128
export https_proxy=http://webproxy.host:3128
export NO_PROXY="localhost,127.0.0.1,.host,.viola.local"
export no_proxy="localhost,127.0.0.1,.host,.viola.local"
Acquire::http::proxy "http://proxy.company.com:80/";
Acquire::https::proxy "https://proxy.company.com:80/";
Acquire::ftp::proxy "ftp://proxy.company.com:80/";
Acquire::socks5::proxy "socks://127.0.0.1:1080/";
http_proxy=http://webproxy.host:3128
no_proxy="localhost,127.0.0.1,.host.de,.viola.local"
create environment for http
sudo gedit /etc/systemd/system/{service name}.service.d/http-proxy.conf
[Service]
Environment="http_proxy=http://user:[email protected]:8080"
create environment for https
sudo gedit /etc/systemd/system/{service name}.service.d/https-proxy.conf
[Service]
Environment="https_proxy=http://user:[email protected]:8080"
restart service, service restart
$ sudo systemctl daemon-reload
$ sudo systemctl restart {service name}
enable automatic start, disable autostart
sudo systemctl enable {service name}
sudo systemctl disable {service name}
service check logs
systemctl status {service name}
journalctl -u {service name} -e
# print all units
journalctl -F _SYSTEMD_UNIT
# system log
journalctl -f -l
# system log for app log
$ journalctl -f -l -u python -u mariadb
# system log since 300 second
$ journalctl -f -l -u httpd -u mariadb --since -300
check settings
systemctl show {service name} | grep proxy
# export SYSTEM_EDITOR="vim"
# export SYSTEMD_EDITOR="vim"
sudo systemctl edit snapd.service
# will edit: /etc/systemd/system/snapd.service.d/override.conf
add next lines
[Service]
Environment=http_proxy=http://proxy:port
Environment=https_proxy=http://proxy:port
restart service
sudo systemctl daemon-reload
sudo systemctl restart snapd.service
sudo snap set system proxy.http="http://user:[email protected]:8080"
sudo snap set system proxy.https="http://user:[email protected]:8080"
export proxy_http="http://user:[email protected]:8080"
export proxy_https="http://user:[email protected]:8080"
sudo snap search visual
# apache server installation, apache server run, web server run, webserver start
sudo su
yum update -y
yum install -y httpd
service httpd start
chkconfig httpd
chkconfig httpd on
vim /var/www/html/index.html
debian
# installation
sudo su
apt update -y
apt install -y apache2
# service
sudo systemctl status apache2.service
sudo systemctl start apache2.service
# change index html
vim /var/www/html/index.html
# Uncomplicated FireWall
ufw app list
ufw allow 'Apache'
ufw status
# enable module
a2enmod rewrite
# disable module
# http://manpages.ubuntu.com/manpages/trusty/man8/a2enmod.8.html
a2dismod rewrite
# enable or disable site/virtual host
# http://manpages.ubuntu.com/manpages/trusty/man8/a2ensite.8.html
a2dissite *.conf
a2ensite my_public_special.conf
sudo a2enmod ssl
# sudo a2dismod ssl
# tutorial
vim /usr/share/doc/apache2/README.Debian.gz
# creating self-signed certificates
sudo make-ssl-cert generate-default-snakeoil --force-overwrite
# check certificates
sudo ls -la /etc/ssl/certs/ssl-cert-snakeoil.pem
sudo ls -la /etc/ssl/private/ssl-cert-snakeoil.key
vim /etc/apache2/sites-available/default-ssl.conf
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
sudo service apache2 restart
Another tutorial https://www.digicert.com/easy-csr/openssl.htm https://www.digicert.com/kb/csr-ssl-installation/ubuntu-server-with-apache2-openssl.htm
Generating a RSA private key
openssl req -new -newkey rsa:2048 \
-nodes -out cherkavideveloper.csr \
-keyout cherkavideveloper.key \
-subj "/C=DE/ST=Bavaria/L=München/O=cherkavi/CN=cherkavi developer" \
SSLCertificateFile "/path/to/www.example.com.cert" SSLCertificateKeyFile "/path/to/www.example.com.key"
- ETL
- ETL
- web management - atomicproject.io, cockpit
- install
- guide
- after installation
- use your own user/password
xmodmap -pke
xmodmap -e "keycode 107 = Super_L"
to reset
setxkbmap
create file '~/.Xmodmap'
xev | grep keysym
sudo evtest
sudo evtest /dev/input/event21
content of $HOME/.config/xmodmap-hjkl
keycode 66 = Mode_switch
keysym h = h H Left
keysym l = l L Right
keysym k = k K Up
keysym j = j J Down
execute re-mapping, permanent solution
# vim /etc/profile
xmodmap $HOME/.config/xmodmap-hjkl
setxkbmap -option
set-title(){
ORIG=$PS1
TITLE="\e]2;$@\a"
PS1=${ORIG}${TITLE}
}
set-title "my title for terminal"
base64 cAdvisor-start.sh | base64 --decode
echo "just a text string" | base64 | base64 --decode
md5sum
sudo dmidecode --string system-serial-number
sudo dmidecode --string processor-family
sudo dmidecode --string system-manufacturer
# disk serial number
sudo lshw -class disk
inxi -C
inxi --memory
inxi -CfxCa
convert -geometry 400x600 -density 100x100 -quality 100 test-pdf.pdf test-pdf.jpg
barcode -o 1112.pdf -e "code39" -b "1112" -u "mm" -g 50x50
http://goqr.me/api/doc/create-qr-code/
http://api.qrserver.com/v1/create-qr-code/?data=HelloWorld!&size=100x100
apt install zbar-tool
zbarimg <file>
gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=finished.pdf test-pdf2.pdf test-pdf3.pdf test-pdf4.pdf
libreoffice --headless --convert-to pdf "/home/path/Dativ.doc" --outdir /tmp/output
bzip2 -dc ricochet-1.1.4-src.tar.bz2 | tar xvf -
alias clipboard="xclip -selection clipboard"
alias clipboard-ingest="xclip -selection clipboard"
function clipboard-copy-file(){
xclip -in -selection c $1
}
alias clipboard-print="xclip -out -selection clipboard"
screenshot(){
file_name="/home/user/Pictures/screenshots/screenshot_"`date +%Y%m%d_%H%M%S`".png"
scrot $file_name -s -e "xdg-open $file_name"
}
http://cups.org - printer installation http://localhost:631/admin in case of authorization issue:
/etc/cups/cupsd.conf and changed the AuthType to None and commented the Require user @SYSTEM:
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default CUPS-Get-Devices>
AuthType None
# AuthType Default
# Require user @SYSTEM
Order deny,allow
</Limit>
and restart the service
sudo service cups restart
dmesg --level=err,warn
dmesg --follow
# save all messages /var/log/dmesg
dmesg -S
df -ha
# with visualization
ncdu
# list of all hard drives, disk list
sudo lshw -class disk -short
# write image
sudo dd bs=4M if=/home/my-user/Downloads/archlinux-2019.07.01-x86_64.iso of=/dev/sdb status=progress && sync
sudo add-apt-repository universe
sudo add-apt-repository ppa:mkusb/ppa
sudo apt-get update
sudo apt install --install-recommends mkusb mkusb-nox usb-pack-efi
mkusb
# Install, persistent live, upefi
# detect disks
sudo lshw -class disk -short
sudo fdisk -l
# format drive
DEST_DRIVE=/dev/sdb
sudo dd if=/dev/zero of=$DEST_DRIVE bs=512 count=1
# sudo mke2fs -t xfs $DEST_DRIVE
# split drive, split disk, split usb
sudo parted $DEST_DRIVE
print
rm 1
rm 2
mklabel kali
msdos
mkpart primary ext4 0.0 5GB
I
mkpart extended ntfs 5GB -1s
print
set 1 boot on
set 2 lba on
quit
sudo fdisk -l
STARTTIME=$SECONDS
sleep 2
echo $SECONDS-$STARTTIME
STARTTIME=`date +%s.%N`
sleep 2.5
ENDTIME=`date +%s.%N`
TIMEDIFF=`echo "$ENDTIME - $STARTTIME" | bc | awk -F"." '{print $1"."substr($2,1,3)}'`
sudo apt-get install translate-shell
trans -source de -target ru -brief "german sentance"
ffmpeg -i video.mp4 -i audio.mp4 output.mp4
sox 1.wav 2.wav 3.wav 4.wav output.wav
ffmpeg -i 1.wav -i 2.wav -i 3.wav output.wav
sudo awk -F: '($3>=LIMIT) && ($3!=65534)' /etc/passwd > passwd-export
sudo awk -F: '($3>=LIMIT) && ($3!=65534)' /etc/group > /opt/group-export
sudo awk -F: '($3>=LIMIT) && ($3!=65534) {print $1}' /etc/passwd | tee - | egrep -f - /etc/shadow > /opt/shadow-export
sudo cp /etc/gshadow /opt/gshadow-export
# sudo apt-get install sqlite3
cat src/scripts.sql | sqlite3 src/db.sqlite
find . -name "*.java" -ls | awk '{byte_size += $7} END{print byte_size}'
du -hs * | sort -h
expr 30 / 5
myvar=$(expr 1 + 1)
echo 'password' | sudo -S bash -c "echo 2 > /sys/module/hid_apple/parameters/fnmode" 2>/dev/null
xdg-mime query default x-scheme-handler/http
open in default browser
x-www-browser http://localhost:9090
rifle <path to file>
sudo apt-get install haskell-stack
stack upgrade
stack install toodles
sudo apt install byobu
dpkg --add-architecture i386
dpkg --print-architecture
dpkg --print-foreign-architectures
apt-get install xdotool
# move the mouse x y
xdotool mousemove 1800 500
# left click
xdotool click 1
gcal --with-week-number