List LSOF and sort
· 185 B · Text
Ham
sudo lsof +L1 | awk 'NR==1{print "COMMAND", "FD", "SIZE_MB", "NAME"} NR>1{print $1, $4, $7, $10}' | numfmt --to-unit=1M --field=3 --format="%.2f MB" --header | sort -k3 -nr | column -t
| 1 | sudo lsof +L1 | awk 'NR==1{print "COMMAND", "FD", "SIZE_MB", "NAME"} NR>1{print $1, $4, $7, $10}' | numfmt --to-unit=1M --field=3 --format="%.2f MB" --header | sort -k3 -nr | column -t |
docker-deleted-log-report.sh
· 1.3 KiB · Bash
Ham
#!/bin/bash
TMPFILE=$(mktemp)
# Get all deleted Docker logs that are still open
sudo lsof +L1 | awk '
NR == 1 { next }
$NF ~ /\(deleted\)$/ && $0 ~ /\/var\/lib\/docker\/containers\/.*-json\.log/ { print }
' | while read -r line; do
PROCESS=$(echo "$line" | awk '{print $1}')
SIZE=$(echo "$line" | awk '{print $(NF-4)}')
FILE=$(echo "$line" | awk '{for (i=9; i<=NF; i++) printf $i " "; print ""}' | sed 's/ (deleted)//')
# Skip if SIZE is not numeric
if ! [[ "$SIZE" =~ ^[0-9]+$ ]]; then
continue
fi
SIZE_MB=$(awk -v s="$SIZE" 'BEGIN {printf "%.2f", s/1048576}')
CONTAINER_ID=$(basename "$(dirname "$FILE")")
# Get container metadata
INFO=$(docker ps -a --no-trunc --filter "id=$CONTAINER_ID" --format "{{.ID}}|{{.Image}}|{{.Command}}|{{.RunningFor}}|{{.Names}}")
if [[ -z "$INFO" ]]; then
ID_SHORT="N/A"; IMAGE="N/A"; CMD="N/A"; CREATED="N/A"; NAMES="N/A"
else
IFS='|' read -r ID IMAGE_RAW CMD CREATED NAMES <<< "$INFO"
ID_SHORT=$(echo "$ID" | cut -c1-12)
IMAGE=$(echo "$IMAGE_RAW" | sed 's|.*/||' | cut -d':' -f1)
fi
echo -e "$SIZE_MB MB\t$PROCESS\t$ID_SHORT\t$CREATED\t$NAMES" >> "$TMPFILE"
done
# Output
#echo -e "Proc\tImage\tSize\tContainer ID\tImage\tCommand\tCreated\tNames"
sort -nk1 "$TMPFILE"
rm -f "$TMPFILE"
| 1 | #!/bin/bash |
| 2 | |
| 3 | TMPFILE=$(mktemp) |
| 4 | |
| 5 | # Get all deleted Docker logs that are still open |
| 6 | sudo lsof +L1 | awk ' |
| 7 | NR == 1 { next } |
| 8 | $NF ~ /\(deleted\)$/ && $0 ~ /\/var\/lib\/docker\/containers\/.*-json\.log/ { print } |
| 9 | ' | while read -r line; do |
| 10 | PROCESS=$(echo "$line" | awk '{print $1}') |
| 11 | SIZE=$(echo "$line" | awk '{print $(NF-4)}') |
| 12 | FILE=$(echo "$line" | awk '{for (i=9; i<=NF; i++) printf $i " "; print ""}' | sed 's/ (deleted)//') |
| 13 | |
| 14 | # Skip if SIZE is not numeric |
| 15 | if ! [[ "$SIZE" =~ ^[0-9]+$ ]]; then |
| 16 | continue |
| 17 | fi |
| 18 | |
| 19 | SIZE_MB=$(awk -v s="$SIZE" 'BEGIN {printf "%.2f", s/1048576}') |
| 20 | CONTAINER_ID=$(basename "$(dirname "$FILE")") |
| 21 | |
| 22 | # Get container metadata |
| 23 | INFO=$(docker ps -a --no-trunc --filter "id=$CONTAINER_ID" --format "{{.ID}}|{{.Image}}|{{.Command}}|{{.RunningFor}}|{{.Names}}") |
| 24 | if [[ -z "$INFO" ]]; then |
| 25 | ID_SHORT="N/A"; IMAGE="N/A"; CMD="N/A"; CREATED="N/A"; NAMES="N/A" |
| 26 | else |
| 27 | IFS='|' read -r ID IMAGE_RAW CMD CREATED NAMES <<< "$INFO" |
| 28 | ID_SHORT=$(echo "$ID" | cut -c1-12) |
| 29 | IMAGE=$(echo "$IMAGE_RAW" | sed 's|.*/||' | cut -d':' -f1) |
| 30 | fi |
| 31 | |
| 32 | echo -e "$SIZE_MB MB\t$PROCESS\t$ID_SHORT\t$CREATED\t$NAMES" >> "$TMPFILE" |
| 33 | done |
| 34 | |
| 35 | # Output |
| 36 | #echo -e "Proc\tImage\tSize\tContainer ID\tImage\tCommand\tCreated\tNames" |
| 37 | sort -nk1 "$TMPFILE" |
| 38 | |
| 39 | rm -f "$TMPFILE" |
k8s-trim-logs.sh
· 3.2 KiB · Bash
Ham
#!/usr/bin/env bash
# Trim Kubernetes/Docker container logs in place, keeping last N lines.
# Default: keep last 1000 lines.
set -euo pipefail
KEEP_LINES="${KEEP_LINES:-1000}"
DRY_RUN=0
# Common log locations
PATHS=(
"/var/lib/docker/containers" # Docker json-file logs
"/var/log/containers" # K8s symlinks
"/var/log/pods" # K8s pod logs
)
usage() {
cat <<EOF
Usage: sudo bash $0 [--lines N] [--dry-run] [--path DIR ...]
--lines N Keep last N lines (default: ${KEEP_LINES})
--dry-run Show what would change, do not modify files
--path DIR Add/override paths to scan (can be repeated)
Env: KEEP_LINES can also be set.
EOF
}
# Parse args
CUSTOM_PATHS=()
while [[ $# -gt 0 ]]; do
case "$1" in
--lines)
KEEP_LINES="$2"; shift 2;;
--dry-run)
DRY_RUN=1; shift;;
--path)
CUSTOM_PATHS+=("$2"); shift 2;;
-h|--help)
usage; exit 0;;
*)
echo "Unknown arg: $1"; usage; exit 1;;
esac
done
if [[ ${#CUSTOM_PATHS[@]} -gt 0 ]]; then
PATHS=("${CUSTOM_PATHS[@]}")
fi
if [[ $EUID -ne 0 ]]; then
echo "Please run as root (sudo)."; exit 1
fi
echo "Scanning paths: ${PATHS[*]}"
echo "Keeping last ${KEEP_LINES} lines per log."
[[ $DRY_RUN -eq 1 ]] && echo "DRY RUN mode—no changes will be made."
# Collect candidate files
mapfile -t FILES < <(
for d in "${PATHS[@]}"; do
[[ -d "$d" ]] || continue
# Typical Docker/K8s log patterns
find "$d" -type f \( -name "*-json.log" -o -name "*.log" \) 2>/dev/null
done | sort -u
)
total_before=0
total_after=0
trimmed_count=0
for f in "${FILES[@]}"; do
[[ -f "$f" ]] || continue
# Skip tiny files quickly
# (If line count <= KEEP_LINES, skip)
# Note: For speed on huge files, `wc -l` is still fine here; we avoid parsing content.
lines=$(wc -l < "$f" || echo 0)
if (( lines <= KEEP_LINES )); then
continue
fi
size_before=$(stat -c%s "$f" 2>/dev/null || echo 0)
if [[ $DRY_RUN -eq 1 ]]; then
echo "[DRY] Would trim: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} bytes -> ~unknown)"
((trimmed_count++))
((total_before+=size_before))
continue
fi
tmp=$(mktemp)
# Capture tail first to avoid losing "last chunk" during truncate
# copytruncate approach: we keep inode by truncating then writing back
if tail -n "${KEEP_LINES}" "$f" > "$tmp" 2>/dev/null; then
# Optional: brief sync to reduce race windows
sync
# Preserve mode/owner; we will truncate & append (same inode)
: > "$f" # truncate in place (keeps inode)
cat "$tmp" >> "$f"
rm -f "$tmp"
size_after=$(stat -c%s "$f" 2>/dev/null || echo 0)
echo "Trimmed: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} -> ${size_after} bytes)"
((trimmed_count++))
((total_before+=size_before))
((total_after+=size_after))
else
echo "WARN: Failed to read $f; skipping."
rm -f "$tmp" || true
fi
done
if [[ $DRY_RUN -eq 1 ]]; then
echo "DRY summary: files to trim: ${trimmed_count}, total pre-trim size: ${total_before} bytes"
else
echo "Summary: trimmed ${trimmed_count} files."
if (( trimmed_count > 0 )); then
echo "Approx freed: $(( total_before - total_after )) bytes"
fi
fi
| 1 | #!/usr/bin/env bash |
| 2 | # Trim Kubernetes/Docker container logs in place, keeping last N lines. |
| 3 | # Default: keep last 1000 lines. |
| 4 | |
| 5 | set -euo pipefail |
| 6 | |
| 7 | KEEP_LINES="${KEEP_LINES:-1000}" |
| 8 | DRY_RUN=0 |
| 9 | |
| 10 | # Common log locations |
| 11 | PATHS=( |
| 12 | "/var/lib/docker/containers" # Docker json-file logs |
| 13 | "/var/log/containers" # K8s symlinks |
| 14 | "/var/log/pods" # K8s pod logs |
| 15 | ) |
| 16 | |
| 17 | usage() { |
| 18 | cat <<EOF |
| 19 | Usage: sudo bash $0 [--lines N] [--dry-run] [--path DIR ...] |
| 20 | --lines N Keep last N lines (default: ${KEEP_LINES}) |
| 21 | --dry-run Show what would change, do not modify files |
| 22 | --path DIR Add/override paths to scan (can be repeated) |
| 23 | Env: KEEP_LINES can also be set. |
| 24 | EOF |
| 25 | } |
| 26 | |
| 27 | # Parse args |
| 28 | CUSTOM_PATHS=() |
| 29 | while [[ $# -gt 0 ]]; do |
| 30 | case "$1" in |
| 31 | --lines) |
| 32 | KEEP_LINES="$2"; shift 2;; |
| 33 | --dry-run) |
| 34 | DRY_RUN=1; shift;; |
| 35 | --path) |
| 36 | CUSTOM_PATHS+=("$2"); shift 2;; |
| 37 | -h|--help) |
| 38 | usage; exit 0;; |
| 39 | *) |
| 40 | echo "Unknown arg: $1"; usage; exit 1;; |
| 41 | esac |
| 42 | done |
| 43 | |
| 44 | if [[ ${#CUSTOM_PATHS[@]} -gt 0 ]]; then |
| 45 | PATHS=("${CUSTOM_PATHS[@]}") |
| 46 | fi |
| 47 | |
| 48 | if [[ $EUID -ne 0 ]]; then |
| 49 | echo "Please run as root (sudo)."; exit 1 |
| 50 | fi |
| 51 | |
| 52 | echo "Scanning paths: ${PATHS[*]}" |
| 53 | echo "Keeping last ${KEEP_LINES} lines per log." |
| 54 | [[ $DRY_RUN -eq 1 ]] && echo "DRY RUN mode—no changes will be made." |
| 55 | |
| 56 | # Collect candidate files |
| 57 | mapfile -t FILES < <( |
| 58 | for d in "${PATHS[@]}"; do |
| 59 | [[ -d "$d" ]] || continue |
| 60 | # Typical Docker/K8s log patterns |
| 61 | find "$d" -type f \( -name "*-json.log" -o -name "*.log" \) 2>/dev/null |
| 62 | done | sort -u |
| 63 | ) |
| 64 | |
| 65 | total_before=0 |
| 66 | total_after=0 |
| 67 | trimmed_count=0 |
| 68 | |
| 69 | for f in "${FILES[@]}"; do |
| 70 | [[ -f "$f" ]] || continue |
| 71 | |
| 72 | # Skip tiny files quickly |
| 73 | # (If line count <= KEEP_LINES, skip) |
| 74 | # Note: For speed on huge files, `wc -l` is still fine here; we avoid parsing content. |
| 75 | lines=$(wc -l < "$f" || echo 0) |
| 76 | if (( lines <= KEEP_LINES )); then |
| 77 | continue |
| 78 | fi |
| 79 | |
| 80 | size_before=$(stat -c%s "$f" 2>/dev/null || echo 0) |
| 81 | |
| 82 | if [[ $DRY_RUN -eq 1 ]]; then |
| 83 | echo "[DRY] Would trim: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} bytes -> ~unknown)" |
| 84 | ((trimmed_count++)) |
| 85 | ((total_before+=size_before)) |
| 86 | continue |
| 87 | fi |
| 88 | |
| 89 | tmp=$(mktemp) |
| 90 | # Capture tail first to avoid losing "last chunk" during truncate |
| 91 | # copytruncate approach: we keep inode by truncating then writing back |
| 92 | if tail -n "${KEEP_LINES}" "$f" > "$tmp" 2>/dev/null; then |
| 93 | # Optional: brief sync to reduce race windows |
| 94 | sync |
| 95 | |
| 96 | # Preserve mode/owner; we will truncate & append (same inode) |
| 97 | : > "$f" # truncate in place (keeps inode) |
| 98 | cat "$tmp" >> "$f" |
| 99 | |
| 100 | rm -f "$tmp" |
| 101 | |
| 102 | size_after=$(stat -c%s "$f" 2>/dev/null || echo 0) |
| 103 | echo "Trimmed: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} -> ${size_after} bytes)" |
| 104 | ((trimmed_count++)) |
| 105 | ((total_before+=size_before)) |
| 106 | ((total_after+=size_after)) |
| 107 | else |
| 108 | echo "WARN: Failed to read $f; skipping." |
| 109 | rm -f "$tmp" || true |
| 110 | fi |
| 111 | done |
| 112 | |
| 113 | if [[ $DRY_RUN -eq 1 ]]; then |
| 114 | echo "DRY summary: files to trim: ${trimmed_count}, total pre-trim size: ${total_before} bytes" |
| 115 | else |
| 116 | echo "Summary: trimmed ${trimmed_count} files." |
| 117 | if (( trimmed_count > 0 )); then |
| 118 | echo "Approx freed: $(( total_before - total_after )) bytes" |
| 119 | fi |
| 120 | fi |
| 121 |
logfile_with_pods.sh
· 1.0 KiB · Bash
Ham
#!/bin/bash
echo -e "SIZE\tPOD_NAME\tNAMESPACE\tCONTAINER_NAME\tLOG_PATH"
# Step 1: Find all container logs and sort by size
sudo find /var/lib/docker/containers/ -type f -name "*.log" | while read -r logfile; do
# echo -e "$logfile"
# Get file size
size=$(sudo du -h "$logfile" | awk '{print $1}')
#echo -e "$size\t$logfile"
# Get container ID from filename or path
container_id=$(basename "$logfile" | cut -d'-' -f1)
#echo -e "$size\t$container_id"
# Use docker inspect to get Kubernetes labels
#pod_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.name"' | cut -d':' -f2 | tr -d ' ",')
#namespace=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.namespace"' | cut -d':' -f2 | tr -d ' ",')
container_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.container.name"' | cut -d':' -f2 | tr -d ' ",')
# Print results
#echo -e "$size\t$pod_name\t$namespace\t$container_name\t$logfile"
echo -e "$size\t$container_name"
done | sort -h
| 1 | #!/bin/bash |
| 2 | |
| 3 | echo -e "SIZE\tPOD_NAME\tNAMESPACE\tCONTAINER_NAME\tLOG_PATH" |
| 4 | |
| 5 | # Step 1: Find all container logs and sort by size |
| 6 | sudo find /var/lib/docker/containers/ -type f -name "*.log" | while read -r logfile; do |
| 7 | # echo -e "$logfile" |
| 8 | # Get file size |
| 9 | size=$(sudo du -h "$logfile" | awk '{print $1}') |
| 10 | #echo -e "$size\t$logfile" |
| 11 | # Get container ID from filename or path |
| 12 | container_id=$(basename "$logfile" | cut -d'-' -f1) |
| 13 | #echo -e "$size\t$container_id" |
| 14 | |
| 15 | # Use docker inspect to get Kubernetes labels |
| 16 | #pod_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.name"' | cut -d':' -f2 | tr -d ' ",') |
| 17 | #namespace=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.namespace"' | cut -d':' -f2 | tr -d ' ",') |
| 18 | container_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.container.name"' | cut -d':' -f2 | tr -d ' ",') |
| 19 | |
| 20 | # Print results |
| 21 | #echo -e "$size\t$pod_name\t$namespace\t$container_name\t$logfile" |
| 22 | echo -e "$size\t$container_name" |
| 23 | done | sort -h |
| 24 |