yaisme revised this gist . Go to revision
1 file changed, 17 insertions, 111 deletions
k8s-trim-logs.sh
| @@ -1,120 +1,26 @@ | |||
| 1 | 1 | #!/usr/bin/env bash | |
| 2 | - | # Trim Kubernetes/Docker container logs in place, keeping last N lines. | |
| 3 | - | # Default: keep last 1000 lines. | |
| 2 | + | # Trim all Docker/K8S container logs to last 1000 lines | |
| 4 | 3 | ||
| 5 | - | set -euo pipefail | |
| 4 | + | KEEP=1000 | |
| 6 | 5 | ||
| 7 | - | KEEP_LINES="${KEEP_LINES:-1000}" | |
| 8 | - | DRY_RUN=0 | |
| 9 | - | ||
| 10 | - | # Common log locations | |
| 11 | - | PATHS=( | |
| 12 | - | "/var/lib/docker/containers" # Docker json-file logs | |
| 13 | - | "/var/log/containers" # K8s symlinks | |
| 14 | - | "/var/log/pods" # K8s pod logs | |
| 15 | - | ) | |
| 16 | - | ||
| 17 | - | usage() { | |
| 18 | - | cat <<EOF | |
| 19 | - | Usage: sudo bash $0 [--lines N] [--dry-run] [--path DIR ...] | |
| 20 | - | --lines N Keep last N lines (default: ${KEEP_LINES}) | |
| 21 | - | --dry-run Show what would change, do not modify files | |
| 22 | - | --path DIR Add/override paths to scan (can be repeated) | |
| 23 | - | Env: KEEP_LINES can also be set. | |
| 24 | - | EOF | |
| 25 | - | } | |
| 26 | - | ||
| 27 | - | # Parse args | |
| 28 | - | CUSTOM_PATHS=() | |
| 29 | - | while [[ $# -gt 0 ]]; do | |
| 30 | - | case "$1" in | |
| 31 | - | --lines) | |
| 32 | - | KEEP_LINES="$2"; shift 2;; | |
| 33 | - | --dry-run) | |
| 34 | - | DRY_RUN=1; shift;; | |
| 35 | - | --path) | |
| 36 | - | CUSTOM_PATHS+=("$2"); shift 2;; | |
| 37 | - | -h|--help) | |
| 38 | - | usage; exit 0;; | |
| 39 | - | *) | |
| 40 | - | echo "Unknown arg: $1"; usage; exit 1;; | |
| 41 | - | esac | |
| 42 | - | done | |
| 43 | - | ||
| 44 | - | if [[ ${#CUSTOM_PATHS[@]} -gt 0 ]]; then | |
| 45 | - | PATHS=("${CUSTOM_PATHS[@]}") | |
| 46 | - | fi | |
| 47 | - | ||
| 48 | - | if [[ $EUID -ne 0 ]]; then | |
| 49 | - | echo "Please run as root (sudo)."; exit 1 | |
| 50 | - | fi | |
| 51 | - | ||
| 52 | - | echo "Scanning paths: ${PATHS[*]}" | |
| 53 | - | echo "Keeping last ${KEEP_LINES} lines per log." | |
| 54 | - | [[ $DRY_RUN -eq 1 ]] && echo "DRY RUN mode—no changes will be made." | |
| 55 | - | ||
| 56 | - | # Collect candidate files | |
| 57 | - | mapfile -t FILES < <( | |
| 58 | - | for d in "${PATHS[@]}"; do | |
| 59 | - | [[ -d "$d" ]] || continue | |
| 60 | - | # Typical Docker/K8s log patterns | |
| 61 | - | find "$d" -type f \( -name "*-json.log" -o -name "*.log" \) 2>/dev/null | |
| 62 | - | done | sort -u | |
| 63 | - | ) | |
| 64 | - | ||
| 65 | - | total_before=0 | |
| 66 | - | total_after=0 | |
| 67 | - | trimmed_count=0 | |
| 68 | - | ||
| 69 | - | for f in "${FILES[@]}"; do | |
| 70 | - | [[ -f "$f" ]] || continue | |
| 71 | - | ||
| 72 | - | # Skip tiny files quickly | |
| 73 | - | # (If line count <= KEEP_LINES, skip) | |
| 74 | - | # Note: For speed on huge files, `wc -l` is still fine here; we avoid parsing content. | |
| 75 | - | lines=$(wc -l < "$f" || echo 0) | |
| 76 | - | if (( lines <= KEEP_LINES )); then | |
| 77 | - | continue | |
| 78 | - | fi | |
| 79 | - | ||
| 80 | - | size_before=$(stat -c%s "$f" 2>/dev/null || echo 0) | |
| 81 | - | ||
| 82 | - | if [[ $DRY_RUN -eq 1 ]]; then | |
| 83 | - | echo "[DRY] Would trim: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} bytes -> ~unknown)" | |
| 84 | - | ((trimmed_count++)) | |
| 85 | - | ((total_before+=size_before)) | |
| 86 | - | continue | |
| 6 | + | # Look for all docker json logs | |
| 7 | + | for f in /var/lib/docker/containers/*/*-json.log; do | |
| 8 | + | if [ -f "$f" ]; then | |
| 9 | + | echo "Trimming $f ..." | |
| 10 | + | tmp=$(mktemp) | |
| 11 | + | tail -n $KEEP "$f" > "$tmp" && cat "$tmp" > "$f" | |
| 12 | + | rm -f "$tmp" | |
| 87 | 13 | fi | |
| 14 | + | done | |
| 88 | 15 | ||
| 89 | - | tmp=$(mktemp) | |
| 90 | - | # Capture tail first to avoid losing "last chunk" during truncate | |
| 91 | - | # copytruncate approach: we keep inode by truncating then writing back | |
| 92 | - | if tail -n "${KEEP_LINES}" "$f" > "$tmp" 2>/dev/null; then | |
| 93 | - | # Optional: brief sync to reduce race windows | |
| 94 | - | sync | |
| 95 | - | ||
| 96 | - | # Preserve mode/owner; we will truncate & append (same inode) | |
| 97 | - | : > "$f" # truncate in place (keeps inode) | |
| 98 | - | cat "$tmp" >> "$f" | |
| 99 | - | ||
| 16 | + | # Also check kubelet-managed logs (optional) | |
| 17 | + | for f in /var/log/containers/*.log /var/log/pods/*/*/*.log; do | |
| 18 | + | if [ -f "$f" ]; then | |
| 19 | + | echo "Trimming $f ..." | |
| 20 | + | tmp=$(mktemp) | |
| 21 | + | tail -n $KEEP "$f" > "$tmp" && cat "$tmp" > "$f" | |
| 100 | 22 | rm -f "$tmp" | |
| 101 | - | ||
| 102 | - | size_after=$(stat -c%s "$f" 2>/dev/null || echo 0) | |
| 103 | - | echo "Trimmed: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} -> ${size_after} bytes)" | |
| 104 | - | ((trimmed_count++)) | |
| 105 | - | ((total_before+=size_before)) | |
| 106 | - | ((total_after+=size_after)) | |
| 107 | - | else | |
| 108 | - | echo "WARN: Failed to read $f; skipping." | |
| 109 | - | rm -f "$tmp" || true | |
| 110 | 23 | fi | |
| 111 | 24 | done | |
| 112 | 25 | ||
| 113 | - | if [[ $DRY_RUN -eq 1 ]]; then | |
| 114 | - | echo "DRY summary: files to trim: ${trimmed_count}, total pre-trim size: ${total_before} bytes" | |
| 115 | - | else | |
| 116 | - | echo "Summary: trimmed ${trimmed_count} files." | |
| 117 | - | if (( trimmed_count > 0 )); then | |
| 118 | - | echo "Approx freed: $(( total_before - total_after )) bytes" | |
| 119 | - | fi | |
| 120 | - | fi | |
| 26 | + | echo "Done." | |
yaisme revised this gist . Go to revision
1 file changed, 120 insertions
k8s-trim-logs.sh(file created)
| @@ -0,0 +1,120 @@ | |||
| 1 | + | #!/usr/bin/env bash | |
| 2 | + | # Trim Kubernetes/Docker container logs in place, keeping last N lines. | |
| 3 | + | # Default: keep last 1000 lines. | |
| 4 | + | ||
| 5 | + | set -euo pipefail | |
| 6 | + | ||
| 7 | + | KEEP_LINES="${KEEP_LINES:-1000}" | |
| 8 | + | DRY_RUN=0 | |
| 9 | + | ||
| 10 | + | # Common log locations | |
| 11 | + | PATHS=( | |
| 12 | + | "/var/lib/docker/containers" # Docker json-file logs | |
| 13 | + | "/var/log/containers" # K8s symlinks | |
| 14 | + | "/var/log/pods" # K8s pod logs | |
| 15 | + | ) | |
| 16 | + | ||
| 17 | + | usage() { | |
| 18 | + | cat <<EOF | |
| 19 | + | Usage: sudo bash $0 [--lines N] [--dry-run] [--path DIR ...] | |
| 20 | + | --lines N Keep last N lines (default: ${KEEP_LINES}) | |
| 21 | + | --dry-run Show what would change, do not modify files | |
| 22 | + | --path DIR Add/override paths to scan (can be repeated) | |
| 23 | + | Env: KEEP_LINES can also be set. | |
| 24 | + | EOF | |
| 25 | + | } | |
| 26 | + | ||
| 27 | + | # Parse args | |
| 28 | + | CUSTOM_PATHS=() | |
| 29 | + | while [[ $# -gt 0 ]]; do | |
| 30 | + | case "$1" in | |
| 31 | + | --lines) | |
| 32 | + | KEEP_LINES="$2"; shift 2;; | |
| 33 | + | --dry-run) | |
| 34 | + | DRY_RUN=1; shift;; | |
| 35 | + | --path) | |
| 36 | + | CUSTOM_PATHS+=("$2"); shift 2;; | |
| 37 | + | -h|--help) | |
| 38 | + | usage; exit 0;; | |
| 39 | + | *) | |
| 40 | + | echo "Unknown arg: $1"; usage; exit 1;; | |
| 41 | + | esac | |
| 42 | + | done | |
| 43 | + | ||
| 44 | + | if [[ ${#CUSTOM_PATHS[@]} -gt 0 ]]; then | |
| 45 | + | PATHS=("${CUSTOM_PATHS[@]}") | |
| 46 | + | fi | |
| 47 | + | ||
| 48 | + | if [[ $EUID -ne 0 ]]; then | |
| 49 | + | echo "Please run as root (sudo)."; exit 1 | |
| 50 | + | fi | |
| 51 | + | ||
| 52 | + | echo "Scanning paths: ${PATHS[*]}" | |
| 53 | + | echo "Keeping last ${KEEP_LINES} lines per log." | |
| 54 | + | [[ $DRY_RUN -eq 1 ]] && echo "DRY RUN mode—no changes will be made." | |
| 55 | + | ||
| 56 | + | # Collect candidate files | |
| 57 | + | mapfile -t FILES < <( | |
| 58 | + | for d in "${PATHS[@]}"; do | |
| 59 | + | [[ -d "$d" ]] || continue | |
| 60 | + | # Typical Docker/K8s log patterns | |
| 61 | + | find "$d" -type f \( -name "*-json.log" -o -name "*.log" \) 2>/dev/null | |
| 62 | + | done | sort -u | |
| 63 | + | ) | |
| 64 | + | ||
| 65 | + | total_before=0 | |
| 66 | + | total_after=0 | |
| 67 | + | trimmed_count=0 | |
| 68 | + | ||
| 69 | + | for f in "${FILES[@]}"; do | |
| 70 | + | [[ -f "$f" ]] || continue | |
| 71 | + | ||
| 72 | + | # Skip tiny files quickly | |
| 73 | + | # (If line count <= KEEP_LINES, skip) | |
| 74 | + | # Note: For speed on huge files, `wc -l` is still fine here; we avoid parsing content. | |
| 75 | + | lines=$(wc -l < "$f" || echo 0) | |
| 76 | + | if (( lines <= KEEP_LINES )); then | |
| 77 | + | continue | |
| 78 | + | fi | |
| 79 | + | ||
| 80 | + | size_before=$(stat -c%s "$f" 2>/dev/null || echo 0) | |
| 81 | + | ||
| 82 | + | if [[ $DRY_RUN -eq 1 ]]; then | |
| 83 | + | echo "[DRY] Would trim: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} bytes -> ~unknown)" | |
| 84 | + | ((trimmed_count++)) | |
| 85 | + | ((total_before+=size_before)) | |
| 86 | + | continue | |
| 87 | + | fi | |
| 88 | + | ||
| 89 | + | tmp=$(mktemp) | |
| 90 | + | # Capture tail first to avoid losing "last chunk" during truncate | |
| 91 | + | # copytruncate approach: we keep inode by truncating then writing back | |
| 92 | + | if tail -n "${KEEP_LINES}" "$f" > "$tmp" 2>/dev/null; then | |
| 93 | + | # Optional: brief sync to reduce race windows | |
| 94 | + | sync | |
| 95 | + | ||
| 96 | + | # Preserve mode/owner; we will truncate & append (same inode) | |
| 97 | + | : > "$f" # truncate in place (keeps inode) | |
| 98 | + | cat "$tmp" >> "$f" | |
| 99 | + | ||
| 100 | + | rm -f "$tmp" | |
| 101 | + | ||
| 102 | + | size_after=$(stat -c%s "$f" 2>/dev/null || echo 0) | |
| 103 | + | echo "Trimmed: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} -> ${size_after} bytes)" | |
| 104 | + | ((trimmed_count++)) | |
| 105 | + | ((total_before+=size_before)) | |
| 106 | + | ((total_after+=size_after)) | |
| 107 | + | else | |
| 108 | + | echo "WARN: Failed to read $f; skipping." | |
| 109 | + | rm -f "$tmp" || true | |
| 110 | + | fi | |
| 111 | + | done | |
| 112 | + | ||
| 113 | + | if [[ $DRY_RUN -eq 1 ]]; then | |
| 114 | + | echo "DRY summary: files to trim: ${trimmed_count}, total pre-trim size: ${total_before} bytes" | |
| 115 | + | else | |
| 116 | + | echo "Summary: trimmed ${trimmed_count} files." | |
| 117 | + | if (( trimmed_count > 0 )); then | |
| 118 | + | echo "Approx freed: $(( total_before - total_after )) bytes" | |
| 119 | + | fi | |
| 120 | + | fi | |
yaisme revised this gist . Go to revision
1 file changed, 1 insertion
List LSOF and sort (file created)
| @@ -0,0 +1 @@ | |||
| 1 | + | sudo lsof +L1 | awk 'NR==1{print "COMMAND", "FD", "SIZE_MB", "NAME"} NR>1{print $1, $4, $7, $10}' | numfmt --to-unit=1M --field=3 --format="%.2f MB" --header | sort -k3 -nr | column -t | |
yaisme revised this gist . Go to revision
1 file changed, 39 insertions
docker-deleted-log-report.sh(file created)
| @@ -0,0 +1,39 @@ | |||
| 1 | + | #!/bin/bash | |
| 2 | + | ||
| 3 | + | TMPFILE=$(mktemp) | |
| 4 | + | ||
| 5 | + | # Get all deleted Docker logs that are still open | |
| 6 | + | sudo lsof +L1 | awk ' | |
| 7 | + | NR == 1 { next } | |
| 8 | + | $NF ~ /\(deleted\)$/ && $0 ~ /\/var\/lib\/docker\/containers\/.*-json\.log/ { print } | |
| 9 | + | ' | while read -r line; do | |
| 10 | + | PROCESS=$(echo "$line" | awk '{print $1}') | |
| 11 | + | SIZE=$(echo "$line" | awk '{print $(NF-4)}') | |
| 12 | + | FILE=$(echo "$line" | awk '{for (i=9; i<=NF; i++) printf $i " "; print ""}' | sed 's/ (deleted)//') | |
| 13 | + | ||
| 14 | + | # Skip if SIZE is not numeric | |
| 15 | + | if ! [[ "$SIZE" =~ ^[0-9]+$ ]]; then | |
| 16 | + | continue | |
| 17 | + | fi | |
| 18 | + | ||
| 19 | + | SIZE_MB=$(awk -v s="$SIZE" 'BEGIN {printf "%.2f", s/1048576}') | |
| 20 | + | CONTAINER_ID=$(basename "$(dirname "$FILE")") | |
| 21 | + | ||
| 22 | + | # Get container metadata | |
| 23 | + | INFO=$(docker ps -a --no-trunc --filter "id=$CONTAINER_ID" --format "{{.ID}}|{{.Image}}|{{.Command}}|{{.RunningFor}}|{{.Names}}") | |
| 24 | + | if [[ -z "$INFO" ]]; then | |
| 25 | + | ID_SHORT="N/A"; IMAGE="N/A"; CMD="N/A"; CREATED="N/A"; NAMES="N/A" | |
| 26 | + | else | |
| 27 | + | IFS='|' read -r ID IMAGE_RAW CMD CREATED NAMES <<< "$INFO" | |
| 28 | + | ID_SHORT=$(echo "$ID" | cut -c1-12) | |
| 29 | + | IMAGE=$(echo "$IMAGE_RAW" | sed 's|.*/||' | cut -d':' -f1) | |
| 30 | + | fi | |
| 31 | + | ||
| 32 | + | echo -e "$SIZE_MB MB\t$PROCESS\t$ID_SHORT\t$CREATED\t$NAMES" >> "$TMPFILE" | |
| 33 | + | done | |
| 34 | + | ||
| 35 | + | # Output | |
| 36 | + | #echo -e "Proc\tImage\tSize\tContainer ID\tImage\tCommand\tCreated\tNames" | |
| 37 | + | sort -nk1 "$TMPFILE" | |
| 38 | + | ||
| 39 | + | rm -f "$TMPFILE" | |
yaisme revised this gist . Go to revision
1 file changed, 23 insertions
logfile_with_pods.sh(file created)
| @@ -0,0 +1,23 @@ | |||
| 1 | + | #!/bin/bash | |
| 2 | + | ||
| 3 | + | echo -e "SIZE\tPOD_NAME\tNAMESPACE\tCONTAINER_NAME\tLOG_PATH" | |
| 4 | + | ||
| 5 | + | # Step 1: Find all container logs and sort by size | |
| 6 | + | sudo find /var/lib/docker/containers/ -type f -name "*.log" | while read -r logfile; do | |
| 7 | + | # echo -e "$logfile" | |
| 8 | + | # Get file size | |
| 9 | + | size=$(sudo du -h "$logfile" | awk '{print $1}') | |
| 10 | + | #echo -e "$size\t$logfile" | |
| 11 | + | # Get container ID from filename or path | |
| 12 | + | container_id=$(basename "$logfile" | cut -d'-' -f1) | |
| 13 | + | #echo -e "$size\t$container_id" | |
| 14 | + | ||
| 15 | + | # Use docker inspect to get Kubernetes labels | |
| 16 | + | #pod_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.name"' | cut -d':' -f2 | tr -d ' ",') | |
| 17 | + | #namespace=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.pod.namespace"' | cut -d':' -f2 | tr -d ' ",') | |
| 18 | + | container_name=$(docker inspect "$container_id" 2>/dev/null | grep '"io.kubernetes.container.name"' | cut -d':' -f2 | tr -d ' ",') | |
| 19 | + | ||
| 20 | + | # Print results | |
| 21 | + | #echo -e "$size\t$pod_name\t$namespace\t$container_name\t$logfile" | |
| 22 | + | echo -e "$size\t$container_name" | |
| 23 | + | done | sort -h | |