#!/usr/bin/env bash # Trim Kubernetes/Docker container logs in place, keeping last N lines. # Default: keep last 1000 lines. set -euo pipefail KEEP_LINES="${KEEP_LINES:-1000}" DRY_RUN=0 # Common log locations PATHS=( "/var/lib/docker/containers" # Docker json-file logs "/var/log/containers" # K8s symlinks "/var/log/pods" # K8s pod logs ) usage() { cat </dev/null done | sort -u ) total_before=0 total_after=0 trimmed_count=0 for f in "${FILES[@]}"; do [[ -f "$f" ]] || continue # Skip tiny files quickly # (If line count <= KEEP_LINES, skip) # Note: For speed on huge files, `wc -l` is still fine here; we avoid parsing content. lines=$(wc -l < "$f" || echo 0) if (( lines <= KEEP_LINES )); then continue fi size_before=$(stat -c%s "$f" 2>/dev/null || echo 0) if [[ $DRY_RUN -eq 1 ]]; then echo "[DRY] Would trim: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} bytes -> ~unknown)" ((trimmed_count++)) ((total_before+=size_before)) continue fi tmp=$(mktemp) # Capture tail first to avoid losing "last chunk" during truncate # copytruncate approach: we keep inode by truncating then writing back if tail -n "${KEEP_LINES}" "$f" > "$tmp" 2>/dev/null; then # Optional: brief sync to reduce race windows sync # Preserve mode/owner; we will truncate & append (same inode) : > "$f" # truncate in place (keeps inode) cat "$tmp" >> "$f" rm -f "$tmp" size_after=$(stat -c%s "$f" 2>/dev/null || echo 0) echo "Trimmed: $f (lines: ${lines} -> ${KEEP_LINES}, size: ${size_before} -> ${size_after} bytes)" ((trimmed_count++)) ((total_before+=size_before)) ((total_after+=size_after)) else echo "WARN: Failed to read $f; skipping." rm -f "$tmp" || true fi done if [[ $DRY_RUN -eq 1 ]]; then echo "DRY summary: files to trim: ${trimmed_count}, total pre-trim size: ${total_before} bytes" else echo "Summary: trimmed ${trimmed_count} files." if (( trimmed_count > 0 )); then echo "Approx freed: $(( total_before - total_after )) bytes" fi fi