144 lines
3.8 KiB
Python
144 lines
3.8 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
LoRa Mesh Experiment Runner
|
|
|
|
Usage:
|
|
python run_experiments.py # Quick comparison
|
|
python run_experiments.py --full # Full parameter sweep
|
|
python run_experiments.py --routing gradient # Single algorithm
|
|
"""
|
|
|
|
import argparse
|
|
import os
|
|
import sys
|
|
|
|
# Add sim to path
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
from sim.experiments.runner import (
|
|
run_parameter_sweep,
|
|
run_quick_comparison,
|
|
save_results_csv,
|
|
compute_averages,
|
|
)
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="LoRa Mesh Experiment Runner")
|
|
|
|
parser.add_argument(
|
|
"--full",
|
|
action="store_true",
|
|
help="Run full parameter sweep (slower)",
|
|
)
|
|
parser.add_argument(
|
|
"--routing",
|
|
choices=["gradient", "flooding", "random"],
|
|
help="Run only specific routing algorithm",
|
|
)
|
|
parser.add_argument(
|
|
"--nodes",
|
|
type=int,
|
|
nargs="+",
|
|
default=[6, 9, 12, 15],
|
|
help="Node counts to test",
|
|
)
|
|
parser.add_argument(
|
|
"--area",
|
|
type=float,
|
|
nargs="+",
|
|
default=[500, 800, 1000],
|
|
help="Area sizes to test",
|
|
)
|
|
parser.add_argument(
|
|
"--time",
|
|
type=float,
|
|
default=200,
|
|
help="Simulation time per experiment",
|
|
)
|
|
parser.add_argument(
|
|
"--output",
|
|
default="results.csv",
|
|
help="Output CSV file",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
print("=" * 60)
|
|
print("LoRa Mesh Experiment Runner")
|
|
print("=" * 60)
|
|
|
|
if args.full:
|
|
# Full parameter sweep
|
|
print("\nRunning full parameter sweep...")
|
|
|
|
routings = ["gradient", "flooding", "random"]
|
|
seeds = [42, 123, 456]
|
|
|
|
results = run_parameter_sweep(
|
|
routings=routings,
|
|
node_counts=args.nodes,
|
|
area_sizes=args.area,
|
|
sim_time=args.time,
|
|
seeds=seeds,
|
|
)
|
|
|
|
# Compute averages
|
|
averaged = compute_averages(results)
|
|
|
|
# Save both raw and averaged
|
|
save_results_csv(results, args.output)
|
|
save_results_csv(averaged, args.output.replace(".csv", "_averaged.csv"))
|
|
|
|
print("\n=== Averaged Results ===")
|
|
for r in averaged:
|
|
print(
|
|
f"{r['routing']:10s} nodes={r['nodes']:2d} area={r['area']:4.0f}m "
|
|
f"PDR={r['avg_pdr']:5.2f}% max_hop={r['avg_max_hop']:.1f}"
|
|
)
|
|
|
|
elif args.routing:
|
|
# Single routing algorithm
|
|
print(f"\nRunning {args.routing} algorithm...")
|
|
|
|
results = run_parameter_sweep(
|
|
routings=[args.routing],
|
|
node_counts=args.nodes,
|
|
area_sizes=args.area,
|
|
sim_time=args.time,
|
|
seeds=[42],
|
|
)
|
|
|
|
save_results_csv(results, args.output)
|
|
|
|
print("\n=== Results ===")
|
|
for r in results:
|
|
print(
|
|
f"{r['routing']:10s} nodes={r['nodes']:2d} area={r['area']:4.0f}m "
|
|
f"PDR={r['pdr']:5.2f}% max_hop={r['max_hop']}"
|
|
)
|
|
|
|
else:
|
|
# Quick comparison (default)
|
|
print("\nRunning quick comparison (3 algorithms)...")
|
|
|
|
results = run_quick_comparison()
|
|
|
|
print("\n=== Results ===")
|
|
for routing, data in results.items():
|
|
print(f"\n{routing.upper()}:")
|
|
print(f" PDR: {data['pdr']:.2f}%")
|
|
print(f" Max Hop: {data['max_hop']}")
|
|
print(f" Avg Hop: {data['avg_hop']:.2f}")
|
|
print(f" Sent: {data['total_sent']}, Received: {data['total_received']}")
|
|
print(f" Collisions: {data['collisions']}")
|
|
|
|
# Save quick results
|
|
save_results_csv([data for data in results.values()], args.output)
|
|
|
|
print(f"\n✓ Results saved to {args.output}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|