一个具备科研可验证性的 LoRa 多跳算法评估基线。

This commit is contained in:
sinlatansen
2026-02-25 20:14:45 +08:00
parent 8537331c6f
commit 5ee1a16574
18 changed files with 1704 additions and 47 deletions

View File

@@ -53,3 +53,8 @@ ROUTE_UPDATE_THRESHOLD = 1.0 # Cost threshold for route update
# =============================================================================
LOG_LEVEL = "INFO" # DEBUG, INFO, WARNING, ERROR
LOG_FORMAT = "[{time:.1f}][NODE{nid:>3}][{event}] {message}"
# =============================================================================
# Experiment Settings
# =============================================================================
RANDOM_SEED = 42 # Default random seed for reproducibility

231
sim/experiments/runner.py Normal file
View File

@@ -0,0 +1,231 @@
"""
Experiment Runner for LoRa Mesh Simulation.
Provides automated experiment execution with parameter sweeps.
"""
import json
import os
from typing import List, Dict, Any
from itertools import product
from sim.main import run_simulation
def run_single_experiment(
routing: str,
node_count: int,
area_size: float,
sim_time: float,
seed: int = 42,
) -> Dict[str, Any]:
"""
Run a single experiment with given parameters.
Args:
routing: Routing algorithm ("gradient", "flooding", "random")
node_count: Number of nodes
area_size: Area size in meters
sim_time: Simulation time in seconds
seed: Random seed
Returns:
Dictionary with experiment results
"""
results = run_simulation(
num_nodes=node_count,
area_size=area_size,
sim_time=sim_time,
seed=seed,
routing_type=routing,
)
m = results["metrics"]
return {
"routing": routing,
"nodes": node_count,
"area": area_size,
"sim_time": sim_time,
"seed": seed,
"pdr": m["pdr"],
"max_hop": m["max_hop"],
"avg_hop": m["avg_hop"],
"total_sent": m["total_sent"],
"total_received": m["total_received"],
"total_forwarded": m["total_forwarded"],
"collisions": m["collisions"],
"convergence_time": m["convergence_time"],
"route_changes": m["route_changes"],
}
def run_parameter_sweep(
routings: List[str] = None,
node_counts: List[int] = None,
area_sizes: List[float] = None,
sim_time: float = 200,
seeds: List[int] = None,
output_file: str = None,
) -> List[Dict[str, Any]]:
"""
Run parameter sweep experiments.
Args:
routings: List of routing algorithms
node_counts: List of node counts
area_sizes: List of area sizes
sim_time: Simulation time (same for all)
seeds: List of random seeds (for averaging)
output_file: Optional output CSV file
Returns:
List of experiment results
"""
# Default parameters
if routings is None:
routings = ["gradient", "flooding", "random"]
if node_counts is None:
node_counts = [6, 9, 12, 15]
if area_sizes is None:
area_sizes = [500, 800, 1000]
if seeds is None:
seeds = [42, 123, 456] # Multiple seeds for averaging
results = []
# Generate all parameter combinations
total_experiments = len(routings) * len(node_counts) * len(area_sizes) * len(seeds)
current = 0
print(f"Running {total_experiments} experiments...")
for routing, nodes, area, seed in product(routings, node_counts, area_sizes, seeds):
current += 1
print(
f" [{current}/{total_experiments}] {routing}, nodes={nodes}, area={area}, seed={seed}"
)
result = run_single_experiment(
routing=routing,
node_count=nodes,
area_size=area,
sim_time=sim_time,
seed=seed,
)
results.append(result)
# Save to CSV if requested
if output_file:
save_results_csv(results, output_file)
return results
def save_results_csv(results: List[Dict[str, Any]], filename: str):
"""Save experiment results to CSV file."""
import csv
if not results:
return
# Get all keys from first result
keys = list(results[0].keys())
with open(filename, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=keys)
writer.writeheader()
writer.writerows(results)
print(f"Results saved to {filename}")
def compute_averages(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Compute average results over multiple seeds.
Args:
results: List of experiment results with varying seeds
Returns:
List of averaged results
"""
from collections import defaultdict
# Group by (routing, nodes, area)
groups = defaultdict(list)
for r in results:
key = (r["routing"], r["nodes"], r["area"])
groups[key].append(r)
# Average each group
averaged = []
numeric_keys = [
"pdr",
"max_hop",
"avg_hop",
"total_sent",
"total_received",
"total_forwarded",
"collisions",
"convergence_time",
"route_changes",
]
for key, group in groups.items():
avg_result = {
"routing": key[0],
"nodes": key[1],
"area": key[2],
"num_seeds": len(group),
}
for nk in numeric_keys:
values = [g[nk] for g in group]
avg_result[f"avg_{nk}"] = sum(values) / len(values)
avg_result[f"min_{nk}"] = min(values)
avg_result[f"max_{nk}"] = max(values)
averaged.append(avg_result)
return averaged
def run_quick_comparison(
routing: str = "gradient",
node_count: int = 12,
area_size: float = 800,
sim_time: float = 200,
) -> Dict[str, Any]:
"""
Run a quick comparison of all routing algorithms.
Returns results for gradient, flooding, and random.
"""
results = {}
for r in ["gradient", "flooding", "random"]:
print(f"Running {r}...")
results[r] = run_single_experiment(
routing=r,
node_count=node_count,
area_size=area_size,
sim_time=sim_time,
)
return results
if __name__ == "__main__":
# Quick test
print("Running quick comparison...")
results = run_quick_comparison()
print("\n=== Results ===")
for routing, data in results.items():
print(f"\n{routing.upper()}:")
print(f" PDR: {data['pdr']:.2f}%")
print(f" Max Hop: {data['max_hop']}")
print(f" Avg Hop: {data['avg_hop']:.2f}")
print(f" Sent: {data['total_sent']}, Received: {data['total_received']}")

View File

@@ -27,6 +27,7 @@ def deploy_nodes(
num_nodes: int = None,
area_size: float = None,
metrics_collector: MetricsCollector = None,
routing_type: str = "gradient",
) -> list:
"""
Deploy nodes randomly in the area.
@@ -37,6 +38,7 @@ def deploy_nodes(
num_nodes: Number of nodes (default from config)
area_size: Area size (default from config)
metrics_collector: Metrics collector for observability
routing_type: Type of routing ("gradient", "flooding", "random")
Returns:
List of Node objects
@@ -60,6 +62,7 @@ def deploy_nodes(
channel=channel,
is_sink=True,
metrics_collector=metrics_collector,
routing_type=routing_type,
)
nodes.append(sink)
@@ -75,6 +78,7 @@ def deploy_nodes(
y=y,
channel=channel,
metrics_collector=metrics_collector,
routing_type=routing_type,
)
nodes.append(node)
@@ -105,6 +109,7 @@ def run_simulation(
area_size: float = None,
sim_time: float = None,
seed: int = None,
routing_type: str = "gradient",
) -> dict:
"""
Run the LoRa network simulation.
@@ -114,6 +119,7 @@ def run_simulation(
area_size: Area size in meters
sim_time: Simulation time in seconds
seed: Random seed for reproducibility
routing_type: Type of routing ("gradient", "flooding", "random")
Returns:
Simulation results including metrics
@@ -140,7 +146,7 @@ def run_simulation(
if sim_time is None:
sim_time = config.SIM_TIME
nodes = deploy_nodes(env, channel, num_nodes, area_size, metrics)
nodes = deploy_nodes(env, channel, num_nodes, area_size, metrics, routing_type)
# Setup receive callbacks
setup_receive_callback(nodes, channel)
@@ -175,6 +181,21 @@ def run_simulation(
metrics.add_collision(channel.collision_count - initial_collisions)
# Get efficiency metrics from channel
efficiency = channel.get_efficiency_metrics()
# Calculate derived efficiency metrics
total_tx = efficiency["total_transmissions"]
total_received = len(metrics.metrics.received_packet_ids)
# TX cost: transmissions per successful delivery
tx_per_success = total_tx / total_received if total_received > 0 else float("inf")
# Airtime usage: percentage of simulation time
airtime_usage = (
(efficiency["total_airtime"] / sim_time * 100) if sim_time > 0 else 0
)
# Get results
results = {
"config": {
@@ -182,8 +203,20 @@ def run_simulation(
"area_size": area_size,
"sim_time": sim_time,
"seed": seed,
"routing_type": routing_type,
},
"metrics": metrics.get_metrics().get_summary(),
"efficiency": {
"total_transmissions": total_tx,
"total_airtime": round(efficiency["total_airtime"], 3),
"airtime_usage_percent": round(airtime_usage, 2),
"tx_per_success": round(tx_per_success, 2)
if tx_per_success != float("inf")
else -1,
"hello_transmissions": efficiency["hello_transmissions"],
"data_transmissions": efficiency["data_transmissions"],
"ack_transmissions": efficiency["ack_transmissions"],
},
"topology": [],
}
@@ -219,6 +252,7 @@ def main():
f"Area: {results['config']['area_size']}m x {results['config']['area_size']}m"
)
print(f"Simulation time: {results['config']['sim_time']}s")
print(f"Routing: {results['config']['routing_type']}")
print("\n--- Metrics ---")
metrics = results["metrics"]
@@ -226,10 +260,16 @@ def main():
print(f"Total received: {metrics['total_received']}")
print(f"Packet Delivery Ratio: {metrics['pdr']}%")
print(f"Average hops: {metrics['avg_hop']}")
print(f"Average retries: {metrics['avg_retries']}")
print(f"Convergence time: {metrics['convergence_time']}s")
print(f"Collisions: {metrics['collisions']}")
print("\n--- Efficiency Metrics ---")
eff = results["efficiency"]
print(f"Total transmissions: {eff['total_transmissions']}")
print(f"Total airtime: {eff['total_airtime']:.3f}s")
print(f"Airtime usage: {eff['airtime_usage_percent']:.2f}%")
print(f"TX per success: {eff['tx_per_success']}")
print("\n--- Topology ---")
for node_info in results["topology"]:
parent_str = (

View File

@@ -13,13 +13,39 @@ from typing import Optional
from dataclasses import dataclass
from sim.core.packet import Packet, PacketType
from sim.routing.gradient_routing import GradientRouting
from sim.routing import (
GradientRouting,
FloodingRouting,
RandomForwardRouting,
BROADCAST,
)
from sim.mac.reliable_mac import ReliableMAC
from sim.radio.channel import Channel, ReceivedPacket
from sim.core.metrics import MetricsCollector
from sim import config
def create_routing(node_id: int, is_sink: bool, routing_type: str = "gradient"):
"""
Factory function to create routing protocol.
Args:
node_id: Node ID
is_sink: Whether this is the sink
routing_type: Type of routing ("gradient", "flooding", "random")
Returns:
Routing protocol instance
"""
routing_type = routing_type.lower()
if routing_type == "flooding":
return FloodingRouting(node_id, is_sink)
elif routing_type == "random":
return RandomForwardRouting(node_id, is_sink)
else: # default to gradient
return GradientRouting(node_id, is_sink)
@dataclass
class NodeStats:
"""Node statistics."""
@@ -53,6 +79,7 @@ class Node:
channel: Channel,
is_sink: bool = False,
metrics_collector: MetricsCollector = None,
routing_type: str = "gradient",
):
"""
Initialize node.
@@ -65,6 +92,7 @@ class Node:
channel: Wireless channel
is_sink: Whether this is the sink node
metrics_collector: Metrics collector for observability
routing_type: Type of routing ("gradient", "flooding", "random")
"""
self.env = env
self.node_id = node_id
@@ -76,11 +104,14 @@ class Node:
# Metrics collector for hop tracking
self.metrics_collector = metrics_collector
# Routing type
self.routing_type = routing_type
# Register position with channel
self.channel.register_node(node_id, x, y)
# Layers
self.routing = GradientRouting(node_id, is_sink)
# Layers - use factory to create routing
self.routing = create_routing(node_id, is_sink, routing_type)
self.mac = ReliableMAC(env, node_id)
# Sequence numbers
@@ -208,7 +239,6 @@ class Node:
# If we're the sink, receive the packet
if self.is_sink:
self.stats.data_received += 1
# print(f"[DEBUG] Sink received DATA from node {packet.src}, hop={packet.hop}, seq={packet.seq}")
# Record unique packet received (for PDR)
if self.metrics_collector:
@@ -219,16 +249,36 @@ class Node:
self._send_ack(packet.src, packet.seq)
return
# If not sink, check if we should forward
# Don't forward if we've already forwarded this packet (check path)
if self.node_id in packet.path:
# We've already seen and forwarded this packet, skip it
return
# For flooding: check if we've seen this packet before
if hasattr(self.routing, "should_forward"):
if not self.routing.should_forward(packet):
return # Already forwarded
# Forward to parent
next_hop = self.routing.get_next_hop()
if next_hop is not None and next_hop != self.node_id:
self._forward_data(packet)
# Get next hop and handle flooding
if hasattr(self.routing, "get_next_hop"):
next_hop = self.routing.get_next_hop(packet)
# Handle flooding (BROADCAST)
if next_hop == BROADCAST:
# Forward to all neighbors
neighbors = self.routing.get_all_neighbors()
for neighbor in neighbors:
if neighbor != packet.src: # Don't send back to sender
self._forward_data_to_neighbor(packet, neighbor)
return
# Handle regular unicast routing
if next_hop is not None and next_hop != self.node_id:
self._forward_data(packet)
def _forward_data_to_neighbor(self, packet: Packet, neighbor: int):
"""Forward a data packet to a specific neighbor (for flooding)."""
# Record this node in the path and increment hop count
packet.add_hop(self.node_id)
# Send to specific neighbor
self.mac.enqueue(packet, neighbor)
self.stats.data_forwarded += 1
def _send_ack(self, dst: int, seq: int):
"""Send ACK packet to destination."""
@@ -261,9 +311,15 @@ class Node:
self.data_seq += 1
self.stats.data_sent += 1
# Send to parent
next_hop = self.routing.get_next_hop()
if next_hop is not None:
# Get next hop and send
next_hop = self.routing.get_next_hop(packet)
# Handle flooding (broadcast to all)
if next_hop == BROADCAST:
neighbors = self.routing.get_all_neighbors()
for neighbor in neighbors:
self.mac.enqueue(packet, neighbor)
elif next_hop is not None:
self.mac.enqueue(packet, next_hop)
def _forward_data(self, packet: Packet):
@@ -271,22 +327,26 @@ class Node:
# Record this node in the path and increment hop count
packet.add_hop(self.node_id)
# Send to parent
next_hop = self.routing.get_next_hop()
if next_hop is not None:
# Get next hop
next_hop = self.routing.get_next_hop(packet)
# Handle flooding
if next_hop == BROADCAST:
neighbors = self.routing.get_all_neighbors()
for neighbor in neighbors:
if neighbor != packet.src:
self._forward_data_to_neighbor(packet, neighbor)
elif next_hop is not None:
self.mac.enqueue(packet, next_hop)
self.stats.data_forwarded += 1
def _check_forward(self):
"""Check if there's data to forward."""
# In a more complex implementation, nodes might buffer data
# For now, we rely on the MAC queue
pass
def _check_convergence(self):
"""Check if routing has converged."""
if not self._converged:
# For now, just signal that we have a route
if self.routing.is_route_valid():
self._converged = True
self.converged.succeed()
@@ -294,15 +354,11 @@ class Node:
def mac_task(self):
"""
MAC layer task - handles sending queue and retries.
Simplified: No ACK waiting for DATA packets to improve throughput.
ACK is still sent from sink but sender doesn't wait for it.
This is more realistic for LoRa mesh where end-to-end ACK is problematic.
"""
while True:
# Check if there's something to send
if self.mac.has_pending():
# Get next packet
item = self.mac.dequeue()
if item:
packet, dst = item
@@ -315,25 +371,11 @@ class Node:
self.channel.transmit(packet, self.node_id)
self.mac.record_send()
# For DATA packets, we don't wait for ACK
# This is a simplification - in production, you'd want some form of
# local ACK or implicit reliability through lower layers
# The packet is either received or lost - no retry for simplicity
pass
# Small wait to prevent busy loop
yield self.env.timeout(0.1)
def send_packet(self, packet: Packet, dst: int):
"""
Send a packet (called by upper layers).
Corresponds to STM32's Radio.Send.
Args:
packet: Packet to send
dst: Destination node ID
"""
"""Send a packet (called by upper layers)."""
self.channel.transmit(packet, self.node_id)
def get_stats(self) -> dict:

View File

@@ -5,6 +5,7 @@ Implements:
- Broadcast propagation to all nodes in range
- Airtime occupation tracking
- Collision detection (time overlap + |RSSI1 - RSSI2| < 6 dB)
- Transmission statistics for efficiency analysis
"""
import simpy
@@ -25,6 +26,7 @@ class Transmission:
end_time: float
rssi: float
channel_busy_until: float
airtime: float = 0.0 # Add airtime field
@dataclass
@@ -46,6 +48,7 @@ class Channel:
- Transmissions and their time slots
- Collision detection based on time overlap and RSSI difference
- Packet delivery to nodes within range
- Transmission statistics for efficiency analysis
"""
COLLISION_RSSI_DIFF_DB = 6.0 # RSSI difference threshold for collision
@@ -61,6 +64,13 @@ class Channel:
self.transmissions: List[Transmission] = []
self.collision_count = 0
# Efficiency metrics
self.total_transmissions = 0
self.total_airtime = 0.0
self.hello_transmissions = 0
self.data_transmissions = 0
self.ack_transmissions = 0
# Callback for packet reception (set by node manager)
self.receive_callback: Optional[Callable[[int, ReceivedPacket], None]] = None
@@ -85,11 +95,18 @@ class Channel:
# Calculate packet size and airtime
if packet.is_hello:
pkt_airtime = airtime_calc.get_hello_airtime()
self.hello_transmissions += 1
elif packet.is_ack:
pkt_airtime = airtime_calc.get_ack_airtime()
self.ack_transmissions += 1
else: # DATA
payload_size = len(packet.payload) if packet.payload else 16
pkt_airtime = airtime_calc.get_data_airtime(payload_size)
self.data_transmissions += 1
# Track transmission statistics
self.total_transmissions += 1
self.total_airtime += pkt_airtime
start_time = self.env.now
end_time = start_time + pkt_airtime
@@ -115,6 +132,7 @@ class Channel:
end_time=end_time,
rssi=sender_rssi,
channel_busy_until=end_time,
airtime=pkt_airtime,
)
if colliding:
@@ -253,7 +271,22 @@ class Channel:
return self.env.now
return max(t.channel_busy_until for t in self.transmissions)
def get_efficiency_metrics(self) -> dict:
"""Get efficiency metrics for analysis."""
return {
"total_transmissions": self.total_transmissions,
"total_airtime": self.total_airtime,
"hello_transmissions": self.hello_transmissions,
"data_transmissions": self.data_transmissions,
"ack_transmissions": self.ack_transmissions,
}
def reset(self):
"""Reset channel state."""
self.transmissions.clear()
self.collision_count = 0
self.total_transmissions = 0
self.total_airtime = 0.0
self.hello_transmissions = 0
self.data_transmissions = 0
self.ack_transmissions = 0

View File

@@ -1,5 +1,7 @@
"""Routing module."""
from sim.routing.gradient_routing import GradientRouting
from sim.routing.flooding import FloodingRouting, BROADCAST
from sim.routing.random_forward import RandomForwardRouting
__all__ = ["GradientRouting"]
__all__ = ["GradientRouting", "FloodingRouting", "RandomForwardRouting", "BROADCAST"]

188
sim/routing/flooding.py Normal file
View File

@@ -0,0 +1,188 @@
"""
Flooding-based routing protocol.
Simple flooding: when a node receives a DATA packet it hasn't seen,
it forwards to ALL neighbors (except the sender).
This is a baseline algorithm for comparison with gradient routing.
"""
import random
from typing import Dict, Optional, Set
from dataclasses import dataclass, field
from sim.core.packet import Packet, PacketType
from sim import config
# Marker for broadcast forwarding
BROADCAST = -1
@dataclass
class NeighborInfo:
"""Information about a neighbor node."""
node_id: int
rssi: float
last_hello_time: float
class FloodingRouting:
"""
Flooding routing protocol.
Each node maintains:
- seen_packets: Set of (src, seq) tuples to prevent duplicate forwarding
- neighbors: Dict of known neighbors
Forwarding logic:
- If packet not seen before, forward to ALL neighbors
- Use seen_packets cache with TTL to prevent infinite loops
"""
def __init__(self, node_id: int, is_sink: bool = False):
"""
Initialize routing.
Args:
node_id: This node's ID
is_sink: Whether this node is the sink
"""
self.node_id = node_id
self.is_sink = is_sink
# Routing state
self.parent: Optional[int] = None # Not used in flooding
self.neighbors: Dict[int, NeighborInfo] = {}
# Flooding state
self.seen_packets: Set[tuple] = set()
self.max_seen_cache = 1000 # Limit cache size
# Sequence number for HELLO messages
self.hello_seq = 0
# Cost (for compatibility with metrics)
self.cost = 0 if is_sink else 1
def reset(self):
"""Reset routing state."""
self.seen_packets.clear()
self.neighbors.clear()
self.hello_seq = 0
self.cost = 0 if self.is_sink else 1
def create_hello_packet(self) -> Packet:
"""
Create a HELLO packet for neighbor discovery.
Returns:
HELLO packet with node ID
"""
packet = Packet(
type=PacketType.HELLO,
src=self.node_id,
dst=-1, # Broadcast
seq=self.hello_seq,
hop=0,
payload=str(self.node_id), # Just send our ID
)
self.hello_seq += 1
return packet
def process_hello(self, packet: Packet, rssi: float) -> bool:
"""
Process received HELLO packet.
For flooding, we just track neighbors - no cost calculation.
Args:
packet: Received HELLO packet
rssi: RSSI of received signal
Returns:
True if neighbor list changed
"""
# Update neighbor info
old_neighbors = len(self.neighbors)
self.neighbors[packet.src] = NeighborInfo(
node_id=packet.src,
rssi=rssi,
last_hello_time=rssi, # Store time in rssi field
)
return len(self.neighbors) != old_neighbors
def get_next_hop(self, packet: Packet = None) -> int:
"""
Get next hops for forwarding.
For flooding, returns BROADCAST to signal all neighbors.
Args:
packet: The packet to forward (for checking seen status)
Returns:
BROADCAST (-1) to forward to all neighbors
"""
return BROADCAST
def should_forward(self, packet: Packet) -> bool:
"""
Check if this packet should be forwarded (not seen before).
Args:
packet: The packet to check
Returns:
True if packet should be forwarded
"""
packet_id = (packet.src, packet.seq)
if packet_id in self.seen_packets:
return False
# Add to seen set
self.seen_packets.add(packet_id)
# Limit cache size
if len(self.seen_packets) > self.max_seen_cache:
# Remove oldest entries
to_remove = len(self.seen_packets) - self.max_seen_cache + 100
self.seen_packets = set(list(self.seen_packets)[to_remove:])
return True
def get_all_neighbors(self) -> list:
"""Get list of all neighbor IDs."""
return list(self.neighbors.keys())
def is_route_valid(self) -> bool:
"""Check if routing is valid."""
# Flooding is always "valid" - we can always forward
return len(self.neighbors) > 0
def cleanup_stale_neighbors(self, current_time: float, timeout: float = 30.0):
"""Remove neighbors that haven't sent HELLO recently."""
stale = [
nid
for nid, info in self.neighbors.items()
if current_time - info.last_hello_time > timeout
]
for nid in stale:
del self.neighbors[nid]
def get_routing_table(self) -> dict:
"""Get routing table for debugging/visualization."""
return {
"node_id": self.node_id,
"is_sink": self.is_sink,
"cost": self.cost,
"parent": self.parent,
"neighbors": {
nid: {"rssi": round(info.rssi, 2)}
for nid, info in self.neighbors.items()
},
"algorithm": "flooding",
}

View File

@@ -109,7 +109,7 @@ class GradientRouting:
node_id=packet.src,
cost=neighbor_cost,
rssi=rssi,
last_hello_time=packet.rssi, # Use rssi field to store time
last_hello_time=rssi, # Use rssi field to store time
)
# Check if we should update our route
@@ -129,10 +129,13 @@ class GradientRouting:
return old_cost != self.cost
def get_next_hop(self) -> Optional[int]:
def get_next_hop(self, packet: Packet = None) -> Optional[int]:
"""
Get next hop towards sink.
Args:
packet: Optional packet (for compatibility)
Returns:
Parent node ID, or None if no route
"""

View File

@@ -0,0 +1,148 @@
"""
Random forwarding routing protocol.
Baseline algorithm: randomly select ONE neighbor to forward to.
This demonstrates the value of gradient-based routing.
"""
import random
from typing import Dict, Optional
from dataclasses import dataclass
from sim.core.packet import Packet, PacketType
from sim import config
@dataclass
class NeighborInfo:
"""Information about a neighbor node."""
node_id: int
rssi: float
last_hello_time: float
class RandomForwardRouting:
"""
Random forwarding routing protocol.
Each node maintains:
- neighbors: Dict of known neighbors
- For each packet, randomly selects ONE neighbor to forward to
This is a baseline to demonstrate why gradient routing is better.
"""
def __init__(self, node_id: int, is_sink: bool = False):
"""
Initialize routing.
Args:
node_id: This node's ID
is_sink: Whether this node is the sink
"""
self.node_id = node_id
self.is_sink = is_sink
# Routing state
self.parent: Optional[int] = None # Randomly selected per packet
self.neighbors: Dict[int, NeighborInfo] = {}
# Sequence number for HELLO messages
self.hello_seq = 0
# Cost (for compatibility with metrics)
self.cost = 0 if is_sink else 1
def reset(self):
"""Reset routing state."""
self.neighbors.clear()
self.hello_seq = 0
self.cost = 0 if self.is_sink else 1
self.parent = None
def create_hello_packet(self) -> Packet:
"""
Create a HELLO packet for neighbor discovery.
Returns:
HELLO packet with node ID
"""
packet = Packet(
type=PacketType.HELLO,
src=self.node_id,
dst=-1, # Broadcast
seq=self.hello_seq,
hop=0,
payload=str(self.node_id),
)
self.hello_seq += 1
return packet
def process_hello(self, packet: Packet, rssi: float) -> bool:
"""
Process received HELLO packet.
Track neighbors but don't calculate cost.
Args:
packet: Received HELLO packet
rssi: RSSI of received signal
Returns:
True if neighbor list changed
"""
# Update neighbor info
old_neighbors = len(self.neighbors)
self.neighbors[packet.src] = NeighborInfo(
node_id=packet.src,
rssi=rssi,
last_hello_time=rssi, # Store time in rssi field
)
return len(self.neighbors) != old_neighbors
def get_next_hop(self, packet: Packet = None) -> Optional[int]:
"""
Randomly select ONE neighbor to forward to.
Args:
packet: The packet to forward (unused in random routing)
Returns:
Random neighbor ID, or None if no neighbors
"""
if not self.neighbors:
return None
# Randomly select one neighbor
neighbor_ids = list(self.neighbors.keys())
return random.choice(neighbor_ids)
def is_route_valid(self) -> bool:
"""Check if routing is valid."""
return len(self.neighbors) > 0
def cleanup_stale_neighbors(self, current_time: float, timeout: float = 30.0):
"""Remove neighbors that haven't sent HELLO recently."""
stale = [
nid
for nid, info in self.neighbors.items()
if current_time - info.last_hello_time > timeout
]
for nid in stale:
del self.neighbors[nid]
def get_routing_table(self) -> dict:
"""Get routing table for debugging/visualization."""
return {
"node_id": self.node_id,
"is_sink": self.is_sink,
"cost": self.cost,
"parent": self.parent,
"neighbors": {
nid: {"rssi": round(info.rssi, 2)}
for nid, info in self.neighbors.items()
},
"algorithm": "random_forward",
}

View File

@@ -0,0 +1,86 @@
"""
Test: Algorithm Comparison
Verify that gradient routing outperforms baseline algorithms.
"""
import pytest
from sim.experiments.runner import run_single_experiment
@pytest.fixture
def seed():
return 42
def test_gradient_outperforms_random(seed):
"""Test that gradient routing has better or equal PDR than random."""
gradient = run_single_experiment(
routing="gradient",
node_count=12,
area_size=800,
sim_time=100,
seed=seed,
)
random = run_single_experiment(
routing="random",
node_count=12,
area_size=800,
sim_time=100,
seed=seed,
)
print(f"\nGradient PDR: {gradient['pdr']:.2f}%")
print(f"Random PDR: {random['pdr']:.2f}%")
# Gradient should be at least as good as random (with small tolerance)
assert gradient["pdr"] >= random["pdr"] - 5.0, (
f"Gradient ({gradient['pdr']}%) should outperform Random ({random['pdr']}%)"
)
def test_all_algorithms_run(seed):
"""Test that all routing algorithms can run without errors."""
for routing in ["gradient", "flooding", "random"]:
result = run_single_experiment(
routing=routing,
node_count=10,
area_size=600,
sim_time=100,
seed=seed,
)
assert result["pdr"] >= 0, f"{routing} should produce valid PDR"
assert result["max_hop"] >= 0, f"{routing} should produce valid max_hop"
def test_flooding_produces_more_hops(seed):
"""Test that flooding produces more hops due to broadcast nature."""
gradient = run_single_experiment(
routing="gradient",
node_count=12,
area_size=800,
sim_time=100,
seed=seed,
)
flooding = run_single_experiment(
routing="flooding",
node_count=12,
area_size=800,
sim_time=100,
seed=seed,
)
print(f"\nGradient max_hop: {gradient['max_hop']}")
print(f"Flooding max_hop: {flooding['max_hop']}")
# Flooding should have higher max hop due to multi-path forwarding
assert flooding["max_hop"] >= gradient["max_hop"], (
"Flooding should produce more hops than gradient routing"
)
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])