一个具备科研可验证性的 LoRa 多跳算法评估基线。
This commit is contained in:
231
sim/experiments/runner.py
Normal file
231
sim/experiments/runner.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""
|
||||
Experiment Runner for LoRa Mesh Simulation.
|
||||
|
||||
Provides automated experiment execution with parameter sweeps.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import List, Dict, Any
|
||||
from itertools import product
|
||||
|
||||
from sim.main import run_simulation
|
||||
|
||||
|
||||
def run_single_experiment(
|
||||
routing: str,
|
||||
node_count: int,
|
||||
area_size: float,
|
||||
sim_time: float,
|
||||
seed: int = 42,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run a single experiment with given parameters.
|
||||
|
||||
Args:
|
||||
routing: Routing algorithm ("gradient", "flooding", "random")
|
||||
node_count: Number of nodes
|
||||
area_size: Area size in meters
|
||||
sim_time: Simulation time in seconds
|
||||
seed: Random seed
|
||||
|
||||
Returns:
|
||||
Dictionary with experiment results
|
||||
"""
|
||||
results = run_simulation(
|
||||
num_nodes=node_count,
|
||||
area_size=area_size,
|
||||
sim_time=sim_time,
|
||||
seed=seed,
|
||||
routing_type=routing,
|
||||
)
|
||||
|
||||
m = results["metrics"]
|
||||
|
||||
return {
|
||||
"routing": routing,
|
||||
"nodes": node_count,
|
||||
"area": area_size,
|
||||
"sim_time": sim_time,
|
||||
"seed": seed,
|
||||
"pdr": m["pdr"],
|
||||
"max_hop": m["max_hop"],
|
||||
"avg_hop": m["avg_hop"],
|
||||
"total_sent": m["total_sent"],
|
||||
"total_received": m["total_received"],
|
||||
"total_forwarded": m["total_forwarded"],
|
||||
"collisions": m["collisions"],
|
||||
"convergence_time": m["convergence_time"],
|
||||
"route_changes": m["route_changes"],
|
||||
}
|
||||
|
||||
|
||||
def run_parameter_sweep(
|
||||
routings: List[str] = None,
|
||||
node_counts: List[int] = None,
|
||||
area_sizes: List[float] = None,
|
||||
sim_time: float = 200,
|
||||
seeds: List[int] = None,
|
||||
output_file: str = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Run parameter sweep experiments.
|
||||
|
||||
Args:
|
||||
routings: List of routing algorithms
|
||||
node_counts: List of node counts
|
||||
area_sizes: List of area sizes
|
||||
sim_time: Simulation time (same for all)
|
||||
seeds: List of random seeds (for averaging)
|
||||
output_file: Optional output CSV file
|
||||
|
||||
Returns:
|
||||
List of experiment results
|
||||
"""
|
||||
# Default parameters
|
||||
if routings is None:
|
||||
routings = ["gradient", "flooding", "random"]
|
||||
if node_counts is None:
|
||||
node_counts = [6, 9, 12, 15]
|
||||
if area_sizes is None:
|
||||
area_sizes = [500, 800, 1000]
|
||||
if seeds is None:
|
||||
seeds = [42, 123, 456] # Multiple seeds for averaging
|
||||
|
||||
results = []
|
||||
|
||||
# Generate all parameter combinations
|
||||
total_experiments = len(routings) * len(node_counts) * len(area_sizes) * len(seeds)
|
||||
current = 0
|
||||
|
||||
print(f"Running {total_experiments} experiments...")
|
||||
|
||||
for routing, nodes, area, seed in product(routings, node_counts, area_sizes, seeds):
|
||||
current += 1
|
||||
print(
|
||||
f" [{current}/{total_experiments}] {routing}, nodes={nodes}, area={area}, seed={seed}"
|
||||
)
|
||||
|
||||
result = run_single_experiment(
|
||||
routing=routing,
|
||||
node_count=nodes,
|
||||
area_size=area,
|
||||
sim_time=sim_time,
|
||||
seed=seed,
|
||||
)
|
||||
results.append(result)
|
||||
|
||||
# Save to CSV if requested
|
||||
if output_file:
|
||||
save_results_csv(results, output_file)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def save_results_csv(results: List[Dict[str, Any]], filename: str):
|
||||
"""Save experiment results to CSV file."""
|
||||
import csv
|
||||
|
||||
if not results:
|
||||
return
|
||||
|
||||
# Get all keys from first result
|
||||
keys = list(results[0].keys())
|
||||
|
||||
with open(filename, "w", newline="") as f:
|
||||
writer = csv.DictWriter(f, fieldnames=keys)
|
||||
writer.writeheader()
|
||||
writer.writerows(results)
|
||||
|
||||
print(f"Results saved to {filename}")
|
||||
|
||||
|
||||
def compute_averages(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Compute average results over multiple seeds.
|
||||
|
||||
Args:
|
||||
results: List of experiment results with varying seeds
|
||||
|
||||
Returns:
|
||||
List of averaged results
|
||||
"""
|
||||
from collections import defaultdict
|
||||
|
||||
# Group by (routing, nodes, area)
|
||||
groups = defaultdict(list)
|
||||
|
||||
for r in results:
|
||||
key = (r["routing"], r["nodes"], r["area"])
|
||||
groups[key].append(r)
|
||||
|
||||
# Average each group
|
||||
averaged = []
|
||||
numeric_keys = [
|
||||
"pdr",
|
||||
"max_hop",
|
||||
"avg_hop",
|
||||
"total_sent",
|
||||
"total_received",
|
||||
"total_forwarded",
|
||||
"collisions",
|
||||
"convergence_time",
|
||||
"route_changes",
|
||||
]
|
||||
|
||||
for key, group in groups.items():
|
||||
avg_result = {
|
||||
"routing": key[0],
|
||||
"nodes": key[1],
|
||||
"area": key[2],
|
||||
"num_seeds": len(group),
|
||||
}
|
||||
|
||||
for nk in numeric_keys:
|
||||
values = [g[nk] for g in group]
|
||||
avg_result[f"avg_{nk}"] = sum(values) / len(values)
|
||||
avg_result[f"min_{nk}"] = min(values)
|
||||
avg_result[f"max_{nk}"] = max(values)
|
||||
|
||||
averaged.append(avg_result)
|
||||
|
||||
return averaged
|
||||
|
||||
|
||||
def run_quick_comparison(
|
||||
routing: str = "gradient",
|
||||
node_count: int = 12,
|
||||
area_size: float = 800,
|
||||
sim_time: float = 200,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run a quick comparison of all routing algorithms.
|
||||
|
||||
Returns results for gradient, flooding, and random.
|
||||
"""
|
||||
results = {}
|
||||
|
||||
for r in ["gradient", "flooding", "random"]:
|
||||
print(f"Running {r}...")
|
||||
results[r] = run_single_experiment(
|
||||
routing=r,
|
||||
node_count=node_count,
|
||||
area_size=area_size,
|
||||
sim_time=sim_time,
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Quick test
|
||||
print("Running quick comparison...")
|
||||
results = run_quick_comparison()
|
||||
|
||||
print("\n=== Results ===")
|
||||
for routing, data in results.items():
|
||||
print(f"\n{routing.upper()}:")
|
||||
print(f" PDR: {data['pdr']:.2f}%")
|
||||
print(f" Max Hop: {data['max_hop']}")
|
||||
print(f" Avg Hop: {data['avg_hop']:.2f}")
|
||||
print(f" Sent: {data['total_sent']}, Received: {data['total_received']}")
|
||||
Reference in New Issue
Block a user