allcodes
allcodes
py
import os
import re
from typing import Callable, Dict, Any, Optional
import logging
logger = logging.getLogger(__name__)
def safe_read_file(path: str, read_bytes: bool = False, max_size: int = 1024) ->
Optional[Any]:
"""Safely read a file with error handling."""
try:
mode = 'rb' if read_bytes else 'r'
with open(path, mode) as f:
return f.read(max_size)
except (IOError, OSError) as e:
logger.debug(f"Cannot read file {path}: {str(e)}")
return None
# Rule definitions
GDPR_RULES = {
"DATA_ENCRYPTION": {
"description": "Unencrypted sensitive data detected",
"check": check_data_encryption,
"recommendation": "Encrypt using AES-256",
"severity": "High"
},
"ACCESS_CONTROL": {
"description": "Insecure file permissions",
"check": check_access_control,
"recommendation": "Set permissions to 600",
"severity": "Medium"
}
}
HIPAA_RULES = {
"PHI_PROTECTION": {
"description": "PHI data exposure",
"check": check_phi_protection,
"recommendation": "Remove/encrypt PHI",
"severity": "Critical"
}
}
PCI_DSS_RULES = {
"CARDHOLDER_DATA": {
"description": "Cardholder data in plaintext",
"check": check_cardholder_data,
"recommendation": "Encrypt data",
"severity": "High"
}
}
NIST_RULES = {
"PASSWORD_COMPLEXITY": {
"description": "Weak password policy",
"check": check_password_complexity,
"recommendation": "Enforce NIST guidelines",
"severity": "Medium"
}
}
#file_tracker.py
import os
import json
import time
from typing import List, Dict
class FileTracker:
"""
FileTracker maintains a cache of file modification times to support incremental
scans.
The cache is stored as a JSON file in the provided cache directory.
"""
def __init__(self, cache_dir: str):
self.cache_dir = cache_dir
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir, exist_ok=True)
self.cache_file = os.path.join(self.cache_dir, "file_cache.json")
self.file_cache: Dict[str, float] = self._load_cache()
#helper_utils.py
import os
import sys
import platform
import re
import json
import logging
from typing import Dict, List, Optional, Any, Tuple
logger = logging.getLogger(__name__)
return text
try:
with open(requirements_file, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
return packages
try:
if os.path.exists(config_file):
with open(config_file, "r") as f:
config = json.load(f)
else:
logger.warning(f"Config file not found: {config_file}")
except Exception as e:
logger.error(f"Error loading config file: {str(e)}")
return config
return 'Unknown'
#password_checker.py
import os
import re
import logging
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
class PasswordChecker:
def __init__(self, directory: str):
self.directory = directory
try:
# Skip binary files and large files
if os.path.getsize(file_path) > 10 * 1024 * 1024: # 10MB
logger.debug(f"Skipping large file: {file_path}")
return []
if not self._is_strong_password(password):
sensitive_info.append({
"file": file_path,
"line": line_num,
"type": credential_type,
"value": password[:3] + "***" + password[-2:] if
len(password) > 5 else "***",
# Mask most of the password
"is_strong": False
})
except Exception as e:
logger.error(f"Error processing file {file_path}: {str(e)}")
return sensitive_info
if not os.path.exists(self.directory):
logger.error(f"Directory not found: {self.directory}")
return [{"error": f"Directory '{self.directory}' not found"}]
try:
# First pass: check known password files
for root, _, files in os.walk(self.directory):
for file in files:
file_path = os.path.join(root, file)
if self._is_password_file(file_path):
logger.debug(f"Checking password file: {file_path}")
sensitive_entries = self._detect_sensitive_info(file_path)
except Exception as e:
logger.error(f"Error during password check: {str(e)}")
issues.append({"error": f"Error during password check: {str(e)}"})
#permission_checker.py
import os
import re
import logging
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
class PasswordChecker:
def __init__(self, directory: str):
self.directory = directory
try:
# Skip binary files and large files
if os.path.getsize(file_path) > 10 * 1024 * 1024: # 10MB
logger.debug(f"Skipping large file: {file_path}")
return []
if not self._is_strong_password(password):
sensitive_info.append({
"file": file_path,
"line": line_num,
"type": credential_type,
"value": password[:3] + "***" + password[-2:] if
len(password) > 5 else "***",
# Mask most of the password
"is_strong": False
})
except Exception as e:
logger.error(f"Error processing file {file_path}: {str(e)}")
return sensitive_info
if not os.path.exists(self.directory):
logger.error(f"Directory not found: {self.directory}")
return [{"error": f"Directory '{self.directory}' not found"}]
try:
# First pass: check known password files
for root, _, files in os.walk(self.directory):
for file in files:
file_path = os.path.join(root, file)
if self._is_password_file(file_path):
logger.debug(f"Checking password file: {file_path}")
sensitive_entries = self._detect_sensitive_info(file_path)
except Exception as e:
logger.error(f"Error during password check: {str(e)}")
issues.append({"error": f"Error during password check: {str(e)}"})
#import csv
import json
import os
from typing import Dict, List, Optional
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
from colorama import Fore, Style
from helper_utils import sanitize_output
class ReportGenerator:
@staticmethod
def _ensure_dir(output_file: str):
"""Ensure the output directory exists."""
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
@staticmethod
def _get_rule(item: Dict) -> str:
"""Get the rule name from an issue item."""
return item.get("rule", "Unknown Rule")
@staticmethod
def _get_severity(issue: str) -> str:
"""Categorize issue severity."""
critical_issues = ["Encryption Failure", "Weak Password", "Unpatched
Vulnerability"]
moderate_issues = ["Access Control Violation", "Outdated Software"]
return "Critical" if issue in critical_issues else "Moderate" if issue in
moderate_issues else "Low"
@staticmethod
def generate_csv(issues: List[Dict], output_file: str):
"""Generate a CSV report."""
ReportGenerator._ensure_dir(output_file)
with open(output_file, "w", newline="", encoding="utf-8") as file:
file.write(ReportGenerator.generate_csv_string(issues))
@staticmethod
def generate_csv_string(issues: List[Dict]) -> str:
"""Generate CSV content as a string."""
from io import StringIO
output = StringIO()
writer = csv.writer(output)
writer.writerow(["Type", "Item", "Issue", "Severity", "Rule",
"Recommendation", "Details"])
for item in issues:
details = item.get("details", "N/A")
severity = ReportGenerator._get_severity(item.get("issue", "Unknown
Issue"))
writer.writerow([
"Software" if "package" in item else "File",
item.get("package", item.get("file", "N/A")),
item.get("issue", "Unknown Issue"),
severity,
ReportGenerator._get_rule(item),
item.get("recommendation", "N/A"),
details
])
return output.getvalue()
@staticmethod
def generate_json(issues: List[Dict], output_file: str):
"""Generate a JSON report."""
ReportGenerator._ensure_dir(output_file)
with open(output_file, "w", encoding="utf-8") as file:
file.write(ReportGenerator.generate_json_string(issues))
@staticmethod
def generate_json_string(issues: List[Dict]) -> str:
"""Generate JSON content as a string."""
report_data = {
"issues": issues,
"summary": {
"total_issues": len(issues),
"critical_issues": sum(
1 for i in issues if
ReportGenerator._get_severity(i.get("issue")) == "Critical"),
"moderate_issues": sum(
1 for i in issues if
ReportGenerator._get_severity(i.get("issue")) == "Moderate"),
"low_issues": sum(1 for i in issues if
ReportGenerator._get_severity(i.get("issue")) == "Low")
}
}
return json.dumps(report_data, indent=4)
@staticmethod
def generate_pdf(issues: List[Dict], output_file: str):
"""Generate a PDF report."""
ReportGenerator._ensure_dir(output_file)
doc = SimpleDocTemplate(output_file, pagesize=letter)
styles = getSampleStyleSheet()
story = []
# Title
story.append(Paragraph("Compliance Report", styles["Title"]))
# Summary
summary_text = f"Total Issues Found: {len(issues)}"
story.append(Paragraph(summary_text, styles["BodyText"]))
# Table data
table_data = [["Type", "Item", "Issue", "Severity", "Rule",
"Recommendation", "Details"]]
for item in issues:
details = item.get("details", "N/A")
severity = ReportGenerator._get_severity(item.get("issue", "Unknown
Issue"))
table_data.append([
"Software" if "package" in item else "File",
item.get("package", item.get("file", "N/A")),
item.get("issue", "Unknown Issue"),
severity,
ReportGenerator._get_rule(item),
item.get("recommendation", "N/A"),
details
])
# Table style
table = Table(table_data)
table.setStyle(TableStyle([
("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#003366")),
("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
("ALIGN", (0, 0), (-1, -1), "CENTER"),
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
("FONTSIZE", (0, 0), (-1, 0), 10),
("BOTTOMPADDING", (0, 0), (-1, 0), 12),
("BACKGROUND", (0, 1), (-1, -1), colors.HexColor("#F0F8FF")),
("GRID", (0, 0), (-1, -1), 1, colors.black),
("WORDWRAP", (0, 0), (-1, -1)),
]))
story.append(table)
doc.build(story)
@staticmethod
@staticmethod
def generate_text(issues: List[Dict], output_file: Optional[str] = None,
**kwargs) -> str:
report = [
f"{Fore.CYAN}=== Compliance Report ==={Style.RESET_ALL}",
f"{Fore.YELLOW}Total Issues: {len(issues)}{Style.RESET_ALL}\n"
]
current_file = None
for item in issues:
if "file" in item:
if item["file"] != current_file:
report.append(f"\n{Fore.GREEN}File: {item['file']}
{Style.RESET_ALL}")
current_file = item["file"]
report.extend([
f"{Fore.RED}• Issue: {item.get('issue', 'Unknown')}",
f" Severity: {item.get('severity', 'Low')}",
f" Rule: {item.get('rule', 'Unknown')}",
f" Recommendation: {item.get('recommendation', 'N/A')}",
"-" * 50
])
report_str = "\n".join(report)
if output_file:
with open(output_file, "w") as f:
f.write(report_str.replace(Fore.CYAN, "").replace(Style.RESET_ALL,
""))
return f"Report saved to {output_file}"
return sanitize_output(report_str)
@staticmethod
def generate_report(issues: List[Dict], output_file: str, report_format: str =
"text"):
"""Unified report generation interface."""
report_format = report_format.lower() # Fix: Use the parameter name
correctly
if report_format == "csv":
ReportGenerator.generate_csv(issues, output_file)
elif report_format == "json":
ReportGenerator.generate_json(issues, output_file)
elif report_format == "pdf":
ReportGenerator.generate_pdf(issues, output_file)
elif report_format == "text":
ReportGenerator.generate_text(issues, output_file)
else:
raise ValueError(f"Unsupported format: {report_format}")
#scanner.py
import os
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
from typing import Dict, List, Any
from compliance_rules import ALL_RULES
logger = logging.getLogger(__name__)
class FileScanner:
def __init__(self, directory: str, framework: str, max_workers: int = 4,
max_file_size: int =10 * 1024 * 1024):
self.directory = directory
self.framework = framework.upper()
self.max_workers = max(1, min(max_workers, 16)) # Limit between 1 and 16
self.max_file_size = max_file_size # Default: 10MB
self.file_queue = []
# Validate framework
if self.framework not in ALL_RULES:
raise ValueError(
f"Unsupported framework: {self.framework}. Available frameworks:
{', '.join(ALL_RULES.keys())}")
self.rules = ALL_RULES[self.framework]
logger.info(f"Initialized scanner for {self.framework} framework with
{self.max_workers} workers")
# Validate directory
if not os.path.exists(self.directory):
logger.error(f"Directory not found: {self.directory}")
return [{"error": f"Directory '{self.directory}' not found"}]
if not os.path.isdir(self.directory):
logger.error(f"Not a directory: {self.directory}")
return [{"error": f"'{self.directory}' is not a directory"}]
if not self.file_queue:
logger.warning(f"No files found in {self.directory}")
return [{"warning": "No files found to scan"}]
return file_issues
#software_checker.py
import subprocess
import platform
import json
import logging
import re
from typing import Dict, List, Any, Optional, Tuple
from packaging.version import parse as parse_version, Version, InvalidVersion
from tqdm import tqdm
logger = logging.getLogger(__name__)
class SoftwareChecker:
def __init__(self, verbose: bool = False):
self.verbose = verbose
self.cache = {} # Cache for API responses
# Configure logger
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
try:
# Get installed packages
packages = self.get_installed_software()
total_packages = len(packages)
if not packages:
logger.warning("No software packages found to check")
return [{
"warning": "No software packages found to check",
"rule": "Software Updates",
"recommendation": "Ensure package managers are properly
installed"
}]
except Exception as e:
logger.debug(f"Error checking {name}: {str(e)}")
finally:
pbar.update(1)
except Exception as e:
logger.error(f"Error during software check: {str(e)}")
return [{
"error": f"Error during software check: {str(e)}",
"rule": "Software Updates",
"recommendation": "Check logs for more details"
}]
return outdated_packages
if os_type == "Linux":
return self._get_linux_packages()
elif os_type == "Windows":
return self._get_windows_packages()
elif os_type == "Darwin": # macOS
return self._get_macos_packages()
else:
logger.warning(f"Unsupported OS: {os_type}")
return {}
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting apt packages: {str(e)}")
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting dpkg packages: {str(e)}")
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting rpm packages: {str(e)}")
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting pacman packages: {str(e)}")
pip_packages = json.loads(output)
for pkg in pip_packages:
packages[pkg["name"]] = pkg["version"]
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting pip packages: {str(e)}")
return packages
try:
# Use PowerShell to get installed applications
output = subprocess.check_output(
["powershell", "-Command",
"Get-WmiObject -Class Win32_Product | Select-Object Name, Version
| ConvertTo-Json"],
text=True,
encoding="utf-8",
errors="replace"
)
if output.strip():
try:
apps = json.loads(output)
if isinstance(apps, dict): # Single app
packages[apps["Name"]] = apps["Version"]
else: # Multiple apps
for app in apps:
if app["Name"] and app["Version"]:
packages[app["Name"]] = app["Version"]
except Exception as e:
logger.debug(f"Error parsing Windows packages: {str(e)}")
except Exception as e:
logger.debug(f"Error getting Windows packages: {str(e)}")
return packages
if packages:
return packages
except Exception as e:
logger.debug(f"Error getting brew packages: {str(e)}")
apps_data = json.loads(output)
if "SPApplicationsDataType" in apps_data:
for app in apps_data["SPApplicationsDataType"]:
if "_name" in app and "version" in app:
packages[app["_name"]] = app["version"]
except Exception as e:
logger.debug(f"Error getting macOS apps: {str(e)}")
return packages
except Exception as e:
logger.debug(f"PyPI check failed for {package_name}: {str(e)}")
data = json.loads(output)
if "tag_name" in data:
version = data["tag_name"].lstrip("v")
return version, "GitHub"
except Exception as e:
logger.debug(f"GitHub check failed for {package_name}: {str(e)}")
if output.strip():
return output.strip(), "NPM"
except Exception as e:
logger.debug(f"NPM check failed for {package_name}: {str(e)}")
return "Low"
except (InvalidVersion, AttributeError):
# If version parsing fails, assume medium severity
return "Medium"
#main.py
import argparse
import os
import time
import sys
import logging
from typing import List, Dict, Any
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
from colorama import Fore, Style, init
VERSION = "2.2.0"
CONFIG_FILE = "config/compliance_settings.json"
@abstractmethod
def check(self) -> List[Dict[str, Any]]:
"""Run the compliance check and return list of issues."""
pass
# Configure logging
def setup_logging(log_file: str = 'compliance.log', verbose: bool = False) -> None:
"""Set up logging configuration."""
log_level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
filename=log_file,
level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.getLogger().addFilter(SanitizationFilter())
def print_status(message: str, color: str = Fore.WHITE) -> None:
"""Print status message with timestamp and color."""
safe_message = sanitize_output(message)
print(f"{color}[{time.strftime('%H:%M:%S')}] {safe_message}")
# Scan options
scan_group = parser.add_argument_group("Scan options")
scan_group.add_argument("-s", "--scan", type=str, metavar="PATH",
help="Directory to scan")
scan_group.add_argument("-f", "--framework", type=str,
choices=["GDPR", "HIPAA", "PCI-DSS", "NIST", "SOC2",
"ISO27001"],
default="GDPR",
help="Compliance framework (default: GDPR)")
scan_group.add_argument("-t", "--threads", type=int, default=4,
help="Number of threads for scanning (default: 4)")
scan_group.add_argument("--exclude", type=str, metavar="PATTERNS",
help="Comma-separated patterns to exclude from
scanning")
scan_group.add_argument("--max-file-size", type=int, default=10,
help="Maximum file size to scan in MB (default: 10)")
scan_group.add_argument("--incremental", action="store_true",
help="Only scan files modified since last scan")
# Check options
check_group = parser.add_argument_group("Check options")
check_group.add_argument("-p", "--check-passwords", action="store_true",
help="Enable password checks")
check_group.add_argument("-u", "--check-outdated", action="store_true",
help="Check for outdated software packages")
check_group.add_argument("--check-permissions", action="store_true",
help="Check file and directory permissions")
check_group.add_argument("--check-all", action="store_true",
help="Enable all checks")
check_group.add_argument("--check-dependencies", action="store_true",
help="Check project dependencies against requirements
file")
check_group.add_argument("--min-severity", type=str,
choices=["info", "low", "medium", "high", "critical"],
default="low",
help="Minimum severity level to report (default:
low)")
# Output options
output_group = parser.add_argument_group("Output options")
output_group.add_argument("-o", "--output", type=str, metavar="FILE",
help="Output report file")
output_group.add_argument("-F", "--output-format", type=str,
choices=["csv", "json", "pdf", "text", "html",
"xml"],
default="text",
help="Report format (default: text)")
output_group.add_argument("--output-all", action="store_true",
help="Generate reports in all available formats")
output_group.add_argument("-v", "--verbose", action="store_true",
help="Show detailed processing information")
output_group.add_argument("--log-file", type=str,
default="logs/compliance.log",
help="Log file location (default:
logs/compliance.log)")
output_group.add_argument("--version", action="version",
version=f"%(prog)s {VERSION}",
help="Show version information")
output_group.add_argument("--save-config", action="store_true",
help="Save current settings as default
configuration")
output_group.add_argument("--profile", type=str, default="default",
help="Configuration profile to use or save (default:
default)")
output_group.add_argument("--progress", action="store_true",
help="Show progress bar for long operations")
# Advanced options
advanced_group = parser.add_argument_group("Advanced options")
advanced_group.add_argument("--interactive", action="store_true",
help="Run in interactive mode")
advanced_group.add_argument("--debug", action="store_true",
help="Enable debug mode")
advanced_group.add_argument("--cache-dir", type=str, default=".cache",
help="Cache directory for incremental scans")
return parser.parse_args()
try:
if not os.path.exists(requirements_file):
print_status(f"Requirements file not found: {requirements_file}",
Fore.YELLOW)
return issues
if verbose:
print_status(f"Found {len(packages)} required packages", Fore.BLUE)
except Exception as e:
logging.error(f"Error checking dependencies: {str(e)}")
print_status(f"Error checking dependencies: {str(e)}", Fore.RED)
return issues
return file_types
if not scan_path:
print_status("No scan path specified. Use --scan to specify a directory.",
Fore.RED)
sys.exit(1)
if not os.path.exists(scan_path):
print_status(f"Path not found: {scan_path}", Fore.RED)
sys.exit(1)
# Normalize path
scan_path = os.path.abspath(scan_path)
try:
# Get OS info for context
os_info = get_os_info()
logging.info(f"Running on {os_info['system']} {os_info['release']}
{os_info['machine']}")
if args.check_passwords or args.check_all:
if args.verbose:
print_status("Checking for password issues...", Fore.BLUE)
from password_checker import PasswordChecker
checker = PasswordChecker(scan_path)
futures.append(executor.submit(checker.check_passwords))
if args.check_permissions or args.check_all:
if args.verbose:
print_status("Checking file and directory permissions...",
Fore.BLUE)
from permission_checker import PermissionChecker
perm_checker = PermissionChecker(scan_path, args.framework)
futures.append(executor.submit(perm_checker.check_permissions))
if args.check_outdated or args.check_all:
if args.verbose:
print_status("Checking for outdated software...", Fore.BLUE)
from software_checker import SoftwareChecker
sw_checker = SoftwareChecker(args.verbose)
futures.append(executor.submit(sw_checker.check_outdated_software))
if args.check_dependencies or args.check_all:
futures.append(executor.submit(check_project_dependencies,
"requirements.txt", args.verbose))
# Collect results
for future in futures:
try:
result = future.result()
if result:
all_issues.extend(result)
except Exception as e:
logging.error(f"Error in compliance check: {str(e)}")
print_status(f"Error in compliance check: {str(e)}", Fore.RED)
if args.min_severity:
severity_levels = {"info": 0, "low": 1, "medium": 2, "high": 3,
"critical": 4}
min_level = severity_levels.get(args.min_severity, 1)
all_issues = [issue for issue in all_issues if
severity_levels.get(issue.get("severity", "low"), 1) >= min_level]
if args.incremental:
tracker.update_file_cache(scan_path)
except KeyboardInterrupt:
print_status("\nOperation cancelled by user", Fore.YELLOW)
sys.exit(1)
except Exception as e:
logging.exception("Critical error during checks")
print_status(f"Critical error: {str(e)}", Fore.RED)
sys.exit(1)
return all_issues
def generate_all_reports(issues: List[Dict[str, Any]], base_filename: str, context:
Dict[str, Any]) -> None:
"""Generate reports in all available formats."""
from report_generator import ReportGenerator
check_all = False
if "6" in check_choice or not check_choice.strip():
check_all = True
else:
if "2" in check_choice:
checks.append("password")
if "3" in check_choice:
checks.append("permission")
if "4" in check_choice:
checks.append("software")
if "5" in check_choice:
checks.append("dependency")
formats = ["text", "csv", "json", "pdf", "html", "xml"]
print(f"{Fore.YELLOW}Available output formats:{Style.RESET_ALL}")
for i, fmt in enumerate(formats, 1):
print(f" {i}. {fmt}")
format_choice = input(f"{Fore.YELLOW}Select output format [1-{len(formats)}]
(default: 1): {Style.RESET_ALL}")
try:
format_idx = int(format_choice) - 1 if format_choice.strip() else 0
output_format = formats[format_idx]
except (ValueError, IndexError):
output_format = "text"
print(f"{Fore.YELLOW}Invalid choice. Using default: {output_format}
{Style.RESET_ALL}")
output_file = input(f"{Fore.YELLOW}Enter output file (leave empty for console
output): {Style.RESET_ALL}")
args.scan = scan_path
args.framework = framework
args.check_all = check_all
args.check_passwords = check_all or "password" in checks
args.check_permissions = check_all or "permission" in checks
args.check_outdated = check_all or "software" in checks
args.check_dependencies = check_all or "dependency" in checks
args.output_format = output_format
args.output = output_file if output_file.strip() else None
start_time = time.time()
try:
if not (args.scan or args.check_outdated):
print_status(
"No actions specified. Use --scan, --check-passwords, --check-
outdated, or --check-permissions",
Fore.RED
)
sys.exit(1)
os_info = get_os_info()
report_context = {
"os_info": os_info,
"tool_version": VERSION,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"scan_duration": time.time() - start_time,
"framework": args.framework,
"scan_path": args.scan,
"checks_performed": {
"file_scan": True if args.scan else False,
"password_check": args.check_passwords or args.check_all,
"permission_check": args.check_permissions or args.check_all,
"software_check": args.check_outdated or args.check_all,
"dependency_check": args.check_dependencies or args.check_all
}
}
if all_issues:
if args.output:
if args.output_all:
base_filename = os.path.splitext(args.output)[0]
generate_all_reports(all_issues, base_filename, report_context)
else:
from report_generator import ReportGenerator
ReportGenerator.generate_report(all_issues, args.output,
args.output_format, context=report_context)
print_status(f"Report saved to {args.output}", Fore.GREEN)
else:
from report_generator import ReportGenerator
print(ReportGenerator.generate_text(all_issues,
context=report_context))
else:
print_status("No issues found. Compliance check passed!", Fore.GREEN)
except KeyboardInterrupt:
print_status("\nOperation cancelled by user", Fore.YELLOW)
logging.warning("Operation cancelled by user")
sys.exit(1)
except Exception as e:
logging.exception("Critical error")
print_status(f"Critical error: {str(e)}", Fore.RED)
sys.exit(1)
if __name__ == "__main__":
main()