准备写论文,论文大纲和数据搞定
This commit is contained in:
207
run_statistical_experiments.py
Normal file
207
run_statistical_experiments.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Statistical Experiment Runner for Paper Results.
|
||||
|
||||
Runs N seeds per algorithm and saves results for statistical analysis.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# Add sim to path
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from sim.main import run_simulation
|
||||
|
||||
|
||||
def run_statistical_experiments(
|
||||
num_seeds=50,
|
||||
num_nodes=12,
|
||||
area_size=800,
|
||||
sim_time=100,
|
||||
algorithms=None,
|
||||
):
|
||||
"""
|
||||
Run experiments with multiple seeds for statistical significance.
|
||||
|
||||
Args:
|
||||
num_seeds: Number of random seeds to run
|
||||
num_nodes: Number of nodes
|
||||
area_size: Area size in meters
|
||||
sim_time: Simulation time
|
||||
algorithms: List of algorithms to test
|
||||
|
||||
Returns:
|
||||
Dictionary with all results
|
||||
"""
|
||||
if algorithms is None:
|
||||
algorithms = ["gradient", "flooding", "random"]
|
||||
|
||||
all_results = {}
|
||||
|
||||
total_runs = num_seeds * len(algorithms)
|
||||
current = 0
|
||||
|
||||
print(
|
||||
f"Running {num_seeds} seeds × {len(algorithms)} algorithms = {total_runs} experiments"
|
||||
)
|
||||
print("=" * 60)
|
||||
|
||||
for algo in algorithms:
|
||||
print(f"\n>>> Algorithm: {algo}")
|
||||
algo_results = []
|
||||
|
||||
for seed in range(num_seeds):
|
||||
current += 1
|
||||
print(f" [{current}/{total_runs}] Seed {seed}...", end=" ")
|
||||
|
||||
try:
|
||||
results = run_simulation(
|
||||
num_nodes=num_nodes,
|
||||
area_size=area_size,
|
||||
sim_time=sim_time,
|
||||
seed=seed,
|
||||
routing_type=algo,
|
||||
)
|
||||
|
||||
m = results["metrics"]
|
||||
e = results["efficiency"]
|
||||
|
||||
# Extract key metrics
|
||||
result = {
|
||||
"seed": seed,
|
||||
"pdr": m["pdr"],
|
||||
"total_sent": m["total_sent"],
|
||||
"total_received": m["total_received"],
|
||||
"max_hop": m["max_hop"],
|
||||
"avg_hop": m["avg_hop"],
|
||||
"total_transmissions": e["total_transmissions"],
|
||||
"airtime_usage_percent": e["airtime_usage_percent"],
|
||||
"tx_per_success": e["tx_per_success"],
|
||||
"collisions": m["collisions"],
|
||||
}
|
||||
|
||||
algo_results.append(result)
|
||||
print(f"PDR={result['pdr']:.2f}%, TX={result['total_transmissions']}")
|
||||
|
||||
except Exception as ex:
|
||||
print(f"ERROR: {ex}")
|
||||
algo_results.append({"seed": seed, "error": str(ex)})
|
||||
|
||||
all_results[algo] = algo_results
|
||||
|
||||
# Save per-algorithm results
|
||||
algo_dir = f"results/{algo}"
|
||||
os.makedirs(algo_dir, exist_ok=True)
|
||||
|
||||
with open(f"{algo_dir}/all_seeds.json", "w") as f:
|
||||
json.dump(algo_results, f, indent=2)
|
||||
|
||||
print(f" Saved {len(algo_results)} results to {algo_dir}/")
|
||||
|
||||
return all_results
|
||||
|
||||
|
||||
def compute_statistics(results):
|
||||
"""Compute mean, std, and 95% CI for each metric."""
|
||||
import math
|
||||
|
||||
metrics = [
|
||||
"pdr",
|
||||
"airtime_usage_percent",
|
||||
"tx_per_success",
|
||||
"max_hop",
|
||||
"collisions",
|
||||
]
|
||||
|
||||
stats = {}
|
||||
|
||||
for algo, algo_results in results.items():
|
||||
valid_results = [r for r in algo_results if "error" not in r]
|
||||
|
||||
stats[algo] = {}
|
||||
|
||||
for metric in metrics:
|
||||
values = [r[metric] for r in valid_results if metric in r]
|
||||
|
||||
if not values:
|
||||
continue
|
||||
|
||||
n = len(values)
|
||||
mean = sum(values) / n
|
||||
variance = sum((x - mean) ** 2 for x in values) / (n - 1) if n > 1 else 0
|
||||
std = math.sqrt(variance)
|
||||
|
||||
# 95% CI = 1.96 * std / sqrt(n)
|
||||
ci_95 = 1.96 * std / math.sqrt(n) if n > 0 else 0
|
||||
|
||||
stats[algo][metric] = {
|
||||
"mean": round(mean, 4),
|
||||
"std": round(std, 4),
|
||||
"ci_95": round(ci_95, 4),
|
||||
"min": round(min(values), 4),
|
||||
"max": round(max(values), 4),
|
||||
"n": n,
|
||||
}
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def save_statistics(all_results, stats):
|
||||
"""Save statistics to JSON."""
|
||||
# Save combined raw results
|
||||
with open("results/all_raw_results.json", "w") as f:
|
||||
json.dump(all_results, f, indent=2)
|
||||
|
||||
# Save statistics
|
||||
with open("results/statistics.json", "w") as f:
|
||||
json.dump(stats, f, indent=2)
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("STATISTICS SUMMARY")
|
||||
print("=" * 60)
|
||||
|
||||
for algo, algo_stats in stats.items():
|
||||
print(f"\n{algo.upper()}:")
|
||||
for metric, s in algo_stats.items():
|
||||
print(f" {metric:30s}: {s['mean']:8.4f} ± {s['ci_95']:.4f} (95% CI)")
|
||||
|
||||
|
||||
def main():
|
||||
print("=" * 60)
|
||||
print("Statistical Experiment Runner for Paper Results")
|
||||
print("=" * 60)
|
||||
print(f"Start time: {datetime.now()}")
|
||||
|
||||
# Configuration
|
||||
NUM_SEEDS = 50
|
||||
NUM_NODES = 12
|
||||
AREA_SIZE = 800
|
||||
SIM_TIME = 100
|
||||
|
||||
# Run experiments
|
||||
all_results = run_statistical_experiments(
|
||||
num_seeds=NUM_SEEDS,
|
||||
num_nodes=NUM_NODES,
|
||||
area_size=AREA_SIZE,
|
||||
sim_time=SIM_TIME,
|
||||
)
|
||||
|
||||
# Compute statistics
|
||||
stats = compute_statistics(all_results)
|
||||
|
||||
# Save results
|
||||
save_statistics(all_results, stats)
|
||||
|
||||
print(f"\nEnd time: {datetime.now()}")
|
||||
print("\nResults saved to:")
|
||||
print(" results/gradient/all_seeds.json")
|
||||
print(" results/flooding/all_seeds.json")
|
||||
print(" results/random/all_seeds.json")
|
||||
print(" results/statistics.json")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user